1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved. 4 * Copyright (C) 2016-2017 Milan Broz 5 * Copyright (C) 2016-2017 Mikulas Patocka 6 * 7 * This file is released under the GPL. 8 */ 9 10 #include "dm-bio-record.h" 11 12 #include <linux/compiler.h> 13 #include <linux/module.h> 14 #include <linux/device-mapper.h> 15 #include <linux/dm-io.h> 16 #include <linux/vmalloc.h> 17 #include <linux/sort.h> 18 #include <linux/rbtree.h> 19 #include <linux/delay.h> 20 #include <linux/hex.h> 21 #include <linux/random.h> 22 #include <linux/reboot.h> 23 #include <crypto/hash.h> 24 #include <crypto/skcipher.h> 25 #include <crypto/utils.h> 26 #include <linux/async_tx.h> 27 #include <linux/dm-bufio.h> 28 29 #include "dm-audit.h" 30 31 #define DM_MSG_PREFIX "integrity" 32 33 #define DEFAULT_INTERLEAVE_SECTORS 32768 34 #define DEFAULT_JOURNAL_SIZE_FACTOR 7 35 #define DEFAULT_SECTORS_PER_BITMAP_BIT 32768 36 #define DEFAULT_BUFFER_SECTORS 128 37 #define DEFAULT_JOURNAL_WATERMARK 50 38 #define DEFAULT_SYNC_MSEC 10000 39 #define DEFAULT_MAX_JOURNAL_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192) 40 #define MIN_LOG2_INTERLEAVE_SECTORS 3 41 #define MAX_LOG2_INTERLEAVE_SECTORS 31 42 #define METADATA_WORKQUEUE_MAX_ACTIVE 16 43 #define RECALC_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048) 44 #define RECALC_WRITE_SUPER 16 45 #define BITMAP_BLOCK_SIZE 4096 /* don't change it */ 46 #define BITMAP_FLUSH_INTERVAL (10 * HZ) 47 #define DISCARD_FILLER 0xf6 48 #define SALT_SIZE 16 49 #define RECHECK_POOL_SIZE 256 50 51 /* 52 * Warning - DEBUG_PRINT prints security-sensitive data to the log, 53 * so it should not be enabled in the official kernel 54 */ 55 //#define DEBUG_PRINT 56 //#define INTERNAL_VERIFY 57 58 /* 59 * On disk structures 60 */ 61 62 #define SB_MAGIC "integrt" 63 #define SB_VERSION_1 1 64 #define SB_VERSION_2 2 65 #define SB_VERSION_3 3 66 #define SB_VERSION_4 4 67 #define SB_VERSION_5 5 68 #define SB_VERSION_6 6 69 #define SB_SECTORS 8 70 #define MAX_SECTORS_PER_BLOCK 8 71 72 struct superblock { 73 __u8 magic[8]; 74 __u8 version; 75 __u8 log2_interleave_sectors; 76 __le16 integrity_tag_size; 77 __le32 journal_sections; 78 __le64 provided_data_sectors; /* userspace uses this value */ 79 __le32 flags; 80 __u8 log2_sectors_per_block; 81 __u8 log2_blocks_per_bitmap_bit; 82 __u8 pad[2]; 83 __le64 recalc_sector; 84 __u8 pad2[8]; 85 __u8 salt[SALT_SIZE]; 86 }; 87 88 #define SB_FLAG_HAVE_JOURNAL_MAC 0x1 89 #define SB_FLAG_RECALCULATING 0x2 90 #define SB_FLAG_DIRTY_BITMAP 0x4 91 #define SB_FLAG_FIXED_PADDING 0x8 92 #define SB_FLAG_FIXED_HMAC 0x10 93 #define SB_FLAG_INLINE 0x20 94 95 #define JOURNAL_ENTRY_ROUNDUP 8 96 97 typedef __le64 commit_id_t; 98 #define JOURNAL_MAC_PER_SECTOR 8 99 100 struct journal_entry { 101 union { 102 struct { 103 __le32 sector_lo; 104 __le32 sector_hi; 105 } s; 106 __le64 sector; 107 } u; 108 commit_id_t last_bytes[]; 109 /* __u8 tag[0]; */ 110 }; 111 112 #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block]) 113 114 #if BITS_PER_LONG == 64 115 #define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0) 116 #else 117 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0) 118 #endif 119 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector) 120 #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1)) 121 #define journal_entry_set_unused(je) ((je)->u.s.sector_hi = cpu_to_le32(-1)) 122 #define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2)) 123 #define journal_entry_set_inprogress(je) ((je)->u.s.sector_hi = cpu_to_le32(-2)) 124 125 #define JOURNAL_BLOCK_SECTORS 8 126 #define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t)) 127 #define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS) 128 129 struct journal_sector { 130 struct_group(sectors, 131 __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR]; 132 __u8 mac[JOURNAL_MAC_PER_SECTOR]; 133 ); 134 commit_id_t commit_id; 135 }; 136 137 #define MAX_TAG_SIZE 255 138 139 #define METADATA_PADDING_SECTORS 8 140 141 #define N_COMMIT_IDS 4 142 143 static unsigned char prev_commit_seq(unsigned char seq) 144 { 145 return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS; 146 } 147 148 static unsigned char next_commit_seq(unsigned char seq) 149 { 150 return (seq + 1) % N_COMMIT_IDS; 151 } 152 153 /* 154 * In-memory structures 155 */ 156 157 struct journal_node { 158 struct rb_node node; 159 sector_t sector; 160 }; 161 162 struct alg_spec { 163 char *alg_string; 164 char *key_string; 165 __u8 *key; 166 unsigned int key_size; 167 }; 168 169 struct dm_integrity_c { 170 struct dm_dev *dev; 171 struct dm_dev *meta_dev; 172 unsigned int tag_size; 173 __s8 log2_tag_size; 174 unsigned int tuple_size; 175 sector_t start; 176 mempool_t journal_io_mempool; 177 struct dm_io_client *io; 178 struct dm_bufio_client *bufio; 179 struct workqueue_struct *metadata_wq; 180 struct superblock *sb; 181 unsigned int journal_pages; 182 unsigned int n_bitmap_blocks; 183 184 struct page_list *journal; 185 struct page_list *journal_io; 186 struct page_list *journal_xor; 187 struct page_list *recalc_bitmap; 188 struct page_list *may_write_bitmap; 189 struct bitmap_block_status *bbs; 190 unsigned int bitmap_flush_interval; 191 int synchronous_mode; 192 struct bio_list synchronous_bios; 193 struct delayed_work bitmap_flush_work; 194 195 struct crypto_skcipher *journal_crypt; 196 struct scatterlist **journal_scatterlist; 197 struct scatterlist **journal_io_scatterlist; 198 struct skcipher_request **sk_requests; 199 200 struct crypto_shash *journal_mac; 201 202 struct journal_node *journal_tree; 203 struct rb_root journal_tree_root; 204 205 sector_t provided_data_sectors; 206 207 unsigned short journal_entry_size; 208 unsigned char journal_entries_per_sector; 209 unsigned char journal_section_entries; 210 unsigned short journal_section_sectors; 211 unsigned int journal_sections; 212 unsigned int journal_entries; 213 sector_t data_device_sectors; 214 sector_t meta_device_sectors; 215 unsigned int initial_sectors; 216 unsigned int metadata_run; 217 __s8 log2_metadata_run; 218 __u8 log2_buffer_sectors; 219 __u8 sectors_per_block; 220 __u8 log2_blocks_per_bitmap_bit; 221 222 unsigned char mode; 223 bool internal_hash; 224 225 int failed; 226 227 struct crypto_shash *internal_shash; 228 struct crypto_ahash *internal_ahash; 229 unsigned int internal_hash_digestsize; 230 231 struct dm_target *ti; 232 233 /* these variables are locked with endio_wait.lock */ 234 struct rb_root in_progress; 235 struct list_head wait_list; 236 wait_queue_head_t endio_wait; 237 struct workqueue_struct *wait_wq; 238 struct workqueue_struct *offload_wq; 239 240 unsigned char commit_seq; 241 commit_id_t commit_ids[N_COMMIT_IDS]; 242 243 unsigned int committed_section; 244 unsigned int n_committed_sections; 245 246 unsigned int uncommitted_section; 247 unsigned int n_uncommitted_sections; 248 249 unsigned int free_section; 250 unsigned char free_section_entry; 251 unsigned int free_sectors; 252 253 unsigned int free_sectors_threshold; 254 255 struct workqueue_struct *commit_wq; 256 struct work_struct commit_work; 257 258 struct workqueue_struct *writer_wq; 259 struct work_struct writer_work; 260 261 struct workqueue_struct *recalc_wq; 262 struct work_struct recalc_work; 263 264 struct bio_list flush_bio_list; 265 266 unsigned long autocommit_jiffies; 267 struct timer_list autocommit_timer; 268 unsigned int autocommit_msec; 269 270 wait_queue_head_t copy_to_journal_wait; 271 272 struct completion crypto_backoff; 273 274 bool wrote_to_journal; 275 bool journal_uptodate; 276 bool just_formatted; 277 bool recalculate_flag; 278 bool reset_recalculate_flag; 279 bool discard; 280 bool fix_padding; 281 bool fix_hmac; 282 bool legacy_recalculate; 283 284 mempool_t ahash_req_pool; 285 struct ahash_request *journal_ahash_req; 286 287 struct alg_spec internal_hash_alg; 288 struct alg_spec journal_crypt_alg; 289 struct alg_spec journal_mac_alg; 290 291 atomic64_t number_of_mismatches; 292 293 mempool_t recheck_pool; 294 struct bio_set recheck_bios; 295 struct bio_set recalc_bios; 296 297 struct notifier_block reboot_notifier; 298 }; 299 300 struct dm_integrity_range { 301 sector_t logical_sector; 302 sector_t n_sectors; 303 bool waiting; 304 union { 305 struct rb_node node; 306 struct { 307 struct task_struct *task; 308 struct list_head wait_entry; 309 }; 310 }; 311 }; 312 313 struct dm_integrity_io { 314 struct work_struct work; 315 316 struct dm_integrity_c *ic; 317 enum req_op op; 318 bool fua; 319 320 struct dm_integrity_range range; 321 322 sector_t metadata_block; 323 unsigned int metadata_offset; 324 325 atomic_t in_flight; 326 blk_status_t bi_status; 327 328 struct completion *completion; 329 330 struct dm_bio_details bio_details; 331 332 char *integrity_payload; 333 unsigned payload_len; 334 bool integrity_payload_from_mempool; 335 bool integrity_range_locked; 336 337 struct ahash_request *ahash_req; 338 }; 339 340 struct journal_completion { 341 struct dm_integrity_c *ic; 342 atomic_t in_flight; 343 struct completion comp; 344 }; 345 346 struct journal_io { 347 struct dm_integrity_range range; 348 struct journal_completion *comp; 349 }; 350 351 struct bitmap_block_status { 352 struct work_struct work; 353 struct dm_integrity_c *ic; 354 unsigned int idx; 355 unsigned long *bitmap; 356 struct bio_list bio_queue; 357 spinlock_t bio_queue_lock; 358 359 }; 360 361 static struct kmem_cache *journal_io_cache; 362 363 #define JOURNAL_IO_MEMPOOL 32 364 #define AHASH_MEMPOOL 32 365 366 #ifdef DEBUG_PRINT 367 #define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__) 368 #define DEBUG_bytes(bytes, len, msg, ...) printk(KERN_DEBUG msg "%s%*ph\n", ##__VA_ARGS__, \ 369 len ? ": " : "", len, bytes) 370 #else 371 #define DEBUG_print(x, ...) do { } while (0) 372 #define DEBUG_bytes(bytes, len, msg, ...) do { } while (0) 373 #endif 374 375 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map); 376 static int dm_integrity_map_inline(struct dm_integrity_io *dio, bool from_map); 377 static void integrity_bio_wait(struct work_struct *w); 378 static void dm_integrity_dtr(struct dm_target *ti); 379 380 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err) 381 { 382 if (err == -EILSEQ) 383 atomic64_inc(&ic->number_of_mismatches); 384 if (!cmpxchg(&ic->failed, 0, err)) 385 DMERR("Error on %s: %d", msg, err); 386 } 387 388 static int dm_integrity_failed(struct dm_integrity_c *ic) 389 { 390 return READ_ONCE(ic->failed); 391 } 392 393 static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic) 394 { 395 if (ic->legacy_recalculate) 396 return false; 397 if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) ? 398 ic->internal_hash_alg.key || ic->journal_mac_alg.key : 399 ic->internal_hash_alg.key && !ic->journal_mac_alg.key) 400 return true; 401 return false; 402 } 403 404 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned int i, 405 unsigned int j, unsigned char seq) 406 { 407 /* 408 * Xor the number with section and sector, so that if a piece of 409 * journal is written at wrong place, it is detected. 410 */ 411 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j); 412 } 413 414 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector, 415 sector_t *area, sector_t *offset) 416 { 417 if (!ic->meta_dev) { 418 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors; 419 *area = data_sector >> log2_interleave_sectors; 420 *offset = (unsigned int)data_sector & ((1U << log2_interleave_sectors) - 1); 421 } else { 422 *area = 0; 423 *offset = data_sector; 424 } 425 } 426 427 #define sector_to_block(ic, n) \ 428 do { \ 429 BUG_ON((n) & (unsigned int)((ic)->sectors_per_block - 1)); \ 430 (n) >>= (ic)->sb->log2_sectors_per_block; \ 431 } while (0) 432 433 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area, 434 sector_t offset, unsigned int *metadata_offset) 435 { 436 __u64 ms; 437 unsigned int mo; 438 439 ms = area << ic->sb->log2_interleave_sectors; 440 if (likely(ic->log2_metadata_run >= 0)) 441 ms += area << ic->log2_metadata_run; 442 else 443 ms += area * ic->metadata_run; 444 ms >>= ic->log2_buffer_sectors; 445 446 sector_to_block(ic, offset); 447 448 if (likely(ic->log2_tag_size >= 0)) { 449 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size); 450 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); 451 } else { 452 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors); 453 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); 454 } 455 *metadata_offset = mo; 456 return ms; 457 } 458 459 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset) 460 { 461 sector_t result; 462 463 if (ic->meta_dev) 464 return offset; 465 466 result = area << ic->sb->log2_interleave_sectors; 467 if (likely(ic->log2_metadata_run >= 0)) 468 result += (area + 1) << ic->log2_metadata_run; 469 else 470 result += (area + 1) * ic->metadata_run; 471 472 result += (sector_t)ic->initial_sectors + offset; 473 result += ic->start; 474 475 return result; 476 } 477 478 static void wraparound_section(struct dm_integrity_c *ic, unsigned int *sec_ptr) 479 { 480 if (unlikely(*sec_ptr >= ic->journal_sections)) 481 *sec_ptr -= ic->journal_sections; 482 } 483 484 static void sb_set_version(struct dm_integrity_c *ic) 485 { 486 if (ic->sb->flags & cpu_to_le32(SB_FLAG_INLINE)) 487 ic->sb->version = SB_VERSION_6; 488 else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) 489 ic->sb->version = SB_VERSION_5; 490 else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) 491 ic->sb->version = SB_VERSION_4; 492 else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) 493 ic->sb->version = SB_VERSION_3; 494 else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) 495 ic->sb->version = SB_VERSION_2; 496 else 497 ic->sb->version = SB_VERSION_1; 498 } 499 500 static int sb_mac(struct dm_integrity_c *ic, bool wr) 501 { 502 SHASH_DESC_ON_STACK(desc, ic->journal_mac); 503 int r; 504 unsigned int mac_size = crypto_shash_digestsize(ic->journal_mac); 505 __u8 *sb = (__u8 *)ic->sb; 506 __u8 *mac = sb + (1 << SECTOR_SHIFT) - mac_size; 507 508 if (sizeof(struct superblock) + mac_size > 1 << SECTOR_SHIFT || 509 mac_size > HASH_MAX_DIGESTSIZE) { 510 dm_integrity_io_error(ic, "digest is too long", -EINVAL); 511 return -EINVAL; 512 } 513 514 desc->tfm = ic->journal_mac; 515 516 if (likely(wr)) { 517 r = crypto_shash_digest(desc, sb, mac - sb, mac); 518 if (unlikely(r < 0)) { 519 dm_integrity_io_error(ic, "crypto_shash_digest", r); 520 return r; 521 } 522 } else { 523 __u8 actual_mac[HASH_MAX_DIGESTSIZE]; 524 525 r = crypto_shash_digest(desc, sb, mac - sb, actual_mac); 526 if (unlikely(r < 0)) { 527 dm_integrity_io_error(ic, "crypto_shash_digest", r); 528 return r; 529 } 530 if (crypto_memneq(mac, actual_mac, mac_size)) { 531 dm_integrity_io_error(ic, "superblock mac", -EILSEQ); 532 dm_audit_log_target(DM_MSG_PREFIX, "mac-superblock", ic->ti, 0); 533 return -EILSEQ; 534 } 535 } 536 537 return 0; 538 } 539 540 static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf) 541 { 542 struct dm_io_request io_req; 543 struct dm_io_region io_loc; 544 const enum req_op op = opf & REQ_OP_MASK; 545 int r; 546 547 io_req.bi_opf = opf; 548 io_req.mem.type = DM_IO_KMEM; 549 io_req.mem.ptr.addr = ic->sb; 550 io_req.notify.fn = NULL; 551 io_req.client = ic->io; 552 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev; 553 io_loc.sector = ic->start; 554 io_loc.count = SB_SECTORS; 555 556 if (op == REQ_OP_WRITE) { 557 sb_set_version(ic); 558 if (ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { 559 r = sb_mac(ic, true); 560 if (unlikely(r)) 561 return r; 562 } 563 } 564 565 r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT); 566 if (unlikely(r)) 567 return r; 568 569 if (op == REQ_OP_READ) { 570 if (ic->mode != 'R' && ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { 571 r = sb_mac(ic, false); 572 if (unlikely(r)) 573 return r; 574 } 575 } 576 577 return 0; 578 } 579 580 #define BITMAP_OP_TEST_ALL_SET 0 581 #define BITMAP_OP_TEST_ALL_CLEAR 1 582 #define BITMAP_OP_SET 2 583 #define BITMAP_OP_CLEAR 3 584 585 static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap, 586 sector_t sector, sector_t n_sectors, int mode) 587 { 588 unsigned long bit, end_bit, this_end_bit, page, end_page; 589 unsigned long *data; 590 591 if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) { 592 DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)", 593 sector, 594 n_sectors, 595 ic->sb->log2_sectors_per_block, 596 ic->log2_blocks_per_bitmap_bit, 597 mode); 598 BUG(); 599 } 600 601 if (unlikely(!n_sectors)) 602 return true; 603 604 bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); 605 end_bit = (sector + n_sectors - 1) >> 606 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); 607 608 page = bit / (PAGE_SIZE * 8); 609 bit %= PAGE_SIZE * 8; 610 611 end_page = end_bit / (PAGE_SIZE * 8); 612 end_bit %= PAGE_SIZE * 8; 613 614 repeat: 615 if (page < end_page) 616 this_end_bit = PAGE_SIZE * 8 - 1; 617 else 618 this_end_bit = end_bit; 619 620 data = lowmem_page_address(bitmap[page].page); 621 622 if (mode == BITMAP_OP_TEST_ALL_SET) { 623 while (bit <= this_end_bit) { 624 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { 625 do { 626 if (data[bit / BITS_PER_LONG] != -1) 627 return false; 628 bit += BITS_PER_LONG; 629 } while (this_end_bit >= bit + BITS_PER_LONG - 1); 630 continue; 631 } 632 if (!test_bit(bit, data)) 633 return false; 634 bit++; 635 } 636 } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) { 637 while (bit <= this_end_bit) { 638 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { 639 do { 640 if (data[bit / BITS_PER_LONG] != 0) 641 return false; 642 bit += BITS_PER_LONG; 643 } while (this_end_bit >= bit + BITS_PER_LONG - 1); 644 continue; 645 } 646 if (test_bit(bit, data)) 647 return false; 648 bit++; 649 } 650 } else if (mode == BITMAP_OP_SET) { 651 while (bit <= this_end_bit) { 652 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { 653 do { 654 data[bit / BITS_PER_LONG] = -1; 655 bit += BITS_PER_LONG; 656 } while (this_end_bit >= bit + BITS_PER_LONG - 1); 657 continue; 658 } 659 __set_bit(bit, data); 660 bit++; 661 } 662 } else if (mode == BITMAP_OP_CLEAR) { 663 if (!bit && this_end_bit == PAGE_SIZE * 8 - 1) 664 clear_page(data); 665 else { 666 while (bit <= this_end_bit) { 667 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { 668 do { 669 data[bit / BITS_PER_LONG] = 0; 670 bit += BITS_PER_LONG; 671 } while (this_end_bit >= bit + BITS_PER_LONG - 1); 672 continue; 673 } 674 __clear_bit(bit, data); 675 bit++; 676 } 677 } 678 } else { 679 BUG(); 680 } 681 682 if (unlikely(page < end_page)) { 683 bit = 0; 684 page++; 685 goto repeat; 686 } 687 688 return true; 689 } 690 691 static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src) 692 { 693 unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE); 694 unsigned int i; 695 696 for (i = 0; i < n_bitmap_pages; i++) { 697 unsigned long *dst_data = lowmem_page_address(dst[i].page); 698 unsigned long *src_data = lowmem_page_address(src[i].page); 699 700 copy_page(dst_data, src_data); 701 } 702 } 703 704 static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector) 705 { 706 unsigned int bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); 707 unsigned int bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8); 708 709 BUG_ON(bitmap_block >= ic->n_bitmap_blocks); 710 return &ic->bbs[bitmap_block]; 711 } 712 713 static void access_journal_check(struct dm_integrity_c *ic, unsigned int section, unsigned int offset, 714 bool e, const char *function) 715 { 716 #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY) 717 unsigned int limit = e ? ic->journal_section_entries : ic->journal_section_sectors; 718 719 if (unlikely(section >= ic->journal_sections) || 720 unlikely(offset >= limit)) { 721 DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)", 722 function, section, offset, ic->journal_sections, limit); 723 BUG(); 724 } 725 #endif 726 } 727 728 static void page_list_location(struct dm_integrity_c *ic, unsigned int section, unsigned int offset, 729 unsigned int *pl_index, unsigned int *pl_offset) 730 { 731 unsigned int sector; 732 733 access_journal_check(ic, section, offset, false, "page_list_location"); 734 735 sector = section * ic->journal_section_sectors + offset; 736 737 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); 738 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); 739 } 740 741 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl, 742 unsigned int section, unsigned int offset, unsigned int *n_sectors) 743 { 744 unsigned int pl_index, pl_offset; 745 char *va; 746 747 page_list_location(ic, section, offset, &pl_index, &pl_offset); 748 749 if (n_sectors) 750 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT; 751 752 va = lowmem_page_address(pl[pl_index].page); 753 754 return (struct journal_sector *)(va + pl_offset); 755 } 756 757 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset) 758 { 759 return access_page_list(ic, ic->journal, section, offset, NULL); 760 } 761 762 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned int section, unsigned int n) 763 { 764 unsigned int rel_sector, offset; 765 struct journal_sector *js; 766 767 access_journal_check(ic, section, n, true, "access_journal_entry"); 768 769 rel_sector = n % JOURNAL_BLOCK_SECTORS; 770 offset = n / JOURNAL_BLOCK_SECTORS; 771 772 js = access_journal(ic, section, rel_sector); 773 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size); 774 } 775 776 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned int section, unsigned int n) 777 { 778 n <<= ic->sb->log2_sectors_per_block; 779 780 n += JOURNAL_BLOCK_SECTORS; 781 782 access_journal_check(ic, section, n, false, "access_journal_data"); 783 784 return access_journal(ic, section, n); 785 } 786 787 static void section_mac(struct dm_integrity_c *ic, unsigned int section, __u8 result[JOURNAL_MAC_SIZE]) 788 { 789 SHASH_DESC_ON_STACK(desc, ic->journal_mac); 790 int r; 791 unsigned int j, size; 792 793 desc->tfm = ic->journal_mac; 794 795 r = crypto_shash_init(desc); 796 if (unlikely(r < 0)) { 797 dm_integrity_io_error(ic, "crypto_shash_init", r); 798 goto err; 799 } 800 801 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { 802 __le64 section_le; 803 804 r = crypto_shash_update(desc, (__u8 *)&ic->sb->salt, SALT_SIZE); 805 if (unlikely(r < 0)) { 806 dm_integrity_io_error(ic, "crypto_shash_update", r); 807 goto err; 808 } 809 810 section_le = cpu_to_le64(section); 811 r = crypto_shash_update(desc, (__u8 *)§ion_le, sizeof(section_le)); 812 if (unlikely(r < 0)) { 813 dm_integrity_io_error(ic, "crypto_shash_update", r); 814 goto err; 815 } 816 } 817 818 for (j = 0; j < ic->journal_section_entries; j++) { 819 struct journal_entry *je = access_journal_entry(ic, section, j); 820 821 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof(je->u.sector)); 822 if (unlikely(r < 0)) { 823 dm_integrity_io_error(ic, "crypto_shash_update", r); 824 goto err; 825 } 826 } 827 828 size = crypto_shash_digestsize(ic->journal_mac); 829 830 if (likely(size <= JOURNAL_MAC_SIZE)) { 831 r = crypto_shash_final(desc, result); 832 if (unlikely(r < 0)) { 833 dm_integrity_io_error(ic, "crypto_shash_final", r); 834 goto err; 835 } 836 memset(result + size, 0, JOURNAL_MAC_SIZE - size); 837 } else { 838 __u8 digest[HASH_MAX_DIGESTSIZE]; 839 840 if (WARN_ON(size > sizeof(digest))) { 841 dm_integrity_io_error(ic, "digest_size", -EINVAL); 842 goto err; 843 } 844 r = crypto_shash_final(desc, digest); 845 if (unlikely(r < 0)) { 846 dm_integrity_io_error(ic, "crypto_shash_final", r); 847 goto err; 848 } 849 memcpy(result, digest, JOURNAL_MAC_SIZE); 850 } 851 852 return; 853 err: 854 memset(result, 0, JOURNAL_MAC_SIZE); 855 } 856 857 static void rw_section_mac(struct dm_integrity_c *ic, unsigned int section, bool wr) 858 { 859 __u8 result[JOURNAL_MAC_SIZE]; 860 unsigned int j; 861 862 if (!ic->journal_mac) 863 return; 864 865 section_mac(ic, section, result); 866 867 for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) { 868 struct journal_sector *js = access_journal(ic, section, j); 869 870 if (likely(wr)) 871 memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR); 872 else { 873 if (crypto_memneq(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) { 874 dm_integrity_io_error(ic, "journal mac", -EILSEQ); 875 dm_audit_log_target(DM_MSG_PREFIX, "mac-journal", ic->ti, 0); 876 } 877 } 878 } 879 } 880 881 static void complete_journal_op(void *context) 882 { 883 struct journal_completion *comp = context; 884 885 BUG_ON(!atomic_read(&comp->in_flight)); 886 if (likely(atomic_dec_and_test(&comp->in_flight))) 887 complete(&comp->comp); 888 } 889 890 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section, 891 unsigned int n_sections, struct journal_completion *comp) 892 { 893 struct async_submit_ctl submit; 894 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT; 895 unsigned int pl_index, pl_offset, section_index; 896 struct page_list *source_pl, *target_pl; 897 898 if (likely(encrypt)) { 899 source_pl = ic->journal; 900 target_pl = ic->journal_io; 901 } else { 902 source_pl = ic->journal_io; 903 target_pl = ic->journal; 904 } 905 906 page_list_location(ic, section, 0, &pl_index, &pl_offset); 907 908 atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight); 909 910 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL); 911 912 section_index = pl_index; 913 914 do { 915 size_t this_step; 916 struct page *src_pages[2]; 917 struct page *dst_page; 918 919 while (unlikely(pl_index == section_index)) { 920 unsigned int dummy; 921 922 if (likely(encrypt)) 923 rw_section_mac(ic, section, true); 924 section++; 925 n_sections--; 926 if (!n_sections) 927 break; 928 page_list_location(ic, section, 0, §ion_index, &dummy); 929 } 930 931 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset); 932 dst_page = target_pl[pl_index].page; 933 src_pages[0] = source_pl[pl_index].page; 934 src_pages[1] = ic->journal_xor[pl_index].page; 935 936 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit); 937 938 pl_index++; 939 pl_offset = 0; 940 n_bytes -= this_step; 941 } while (n_bytes); 942 943 BUG_ON(n_sections); 944 945 async_tx_issue_pending_all(); 946 } 947 948 static void complete_journal_encrypt(void *data, int err) 949 { 950 struct journal_completion *comp = data; 951 952 if (unlikely(err)) { 953 if (likely(err == -EINPROGRESS)) { 954 complete(&comp->ic->crypto_backoff); 955 return; 956 } 957 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err); 958 } 959 complete_journal_op(comp); 960 } 961 962 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp) 963 { 964 int r; 965 966 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 967 complete_journal_encrypt, comp); 968 if (likely(encrypt)) 969 r = crypto_skcipher_encrypt(req); 970 else 971 r = crypto_skcipher_decrypt(req); 972 if (likely(!r)) 973 return false; 974 if (likely(r == -EINPROGRESS)) 975 return true; 976 if (likely(r == -EBUSY)) { 977 wait_for_completion(&comp->ic->crypto_backoff); 978 reinit_completion(&comp->ic->crypto_backoff); 979 return true; 980 } 981 dm_integrity_io_error(comp->ic, "encrypt", r); 982 return false; 983 } 984 985 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section, 986 unsigned int n_sections, struct journal_completion *comp) 987 { 988 struct scatterlist **source_sg; 989 struct scatterlist **target_sg; 990 991 atomic_add(2, &comp->in_flight); 992 993 if (likely(encrypt)) { 994 source_sg = ic->journal_scatterlist; 995 target_sg = ic->journal_io_scatterlist; 996 } else { 997 source_sg = ic->journal_io_scatterlist; 998 target_sg = ic->journal_scatterlist; 999 } 1000 1001 do { 1002 struct skcipher_request *req; 1003 unsigned int ivsize; 1004 char *iv; 1005 1006 if (likely(encrypt)) 1007 rw_section_mac(ic, section, true); 1008 1009 req = ic->sk_requests[section]; 1010 ivsize = crypto_skcipher_ivsize(ic->journal_crypt); 1011 iv = req->iv; 1012 1013 memcpy(iv, iv + ivsize, ivsize); 1014 1015 req->src = source_sg[section]; 1016 req->dst = target_sg[section]; 1017 1018 if (unlikely(do_crypt(encrypt, req, comp))) 1019 atomic_inc(&comp->in_flight); 1020 1021 section++; 1022 n_sections--; 1023 } while (n_sections); 1024 1025 atomic_dec(&comp->in_flight); 1026 complete_journal_op(comp); 1027 } 1028 1029 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section, 1030 unsigned int n_sections, struct journal_completion *comp) 1031 { 1032 if (ic->journal_xor) 1033 return xor_journal(ic, encrypt, section, n_sections, comp); 1034 else 1035 return crypt_journal(ic, encrypt, section, n_sections, comp); 1036 } 1037 1038 static void complete_journal_io(unsigned long error, void *context) 1039 { 1040 struct journal_completion *comp = context; 1041 1042 if (unlikely(error != 0)) 1043 dm_integrity_io_error(comp->ic, "writing journal", -EIO); 1044 complete_journal_op(comp); 1045 } 1046 1047 static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf, 1048 unsigned int sector, unsigned int n_sectors, 1049 struct journal_completion *comp) 1050 { 1051 struct dm_io_request io_req; 1052 struct dm_io_region io_loc; 1053 unsigned int pl_index, pl_offset; 1054 int r; 1055 1056 if (unlikely(dm_integrity_failed(ic))) { 1057 if (comp) 1058 complete_journal_io(-1UL, comp); 1059 return; 1060 } 1061 1062 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); 1063 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); 1064 1065 io_req.bi_opf = opf; 1066 io_req.mem.type = DM_IO_PAGE_LIST; 1067 if (ic->journal_io) 1068 io_req.mem.ptr.pl = &ic->journal_io[pl_index]; 1069 else 1070 io_req.mem.ptr.pl = &ic->journal[pl_index]; 1071 io_req.mem.offset = pl_offset; 1072 if (likely(comp != NULL)) { 1073 io_req.notify.fn = complete_journal_io; 1074 io_req.notify.context = comp; 1075 } else { 1076 io_req.notify.fn = NULL; 1077 } 1078 io_req.client = ic->io; 1079 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev; 1080 io_loc.sector = ic->start + SB_SECTORS + sector; 1081 io_loc.count = n_sectors; 1082 1083 r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT); 1084 if (unlikely(r)) { 1085 dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ? 1086 "reading journal" : "writing journal", r); 1087 if (comp) { 1088 WARN_ONCE(1, "asynchronous dm_io failed: %d", r); 1089 complete_journal_io(-1UL, comp); 1090 } 1091 } 1092 } 1093 1094 static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf, 1095 unsigned int section, unsigned int n_sections, 1096 struct journal_completion *comp) 1097 { 1098 unsigned int sector, n_sectors; 1099 1100 sector = section * ic->journal_section_sectors; 1101 n_sectors = n_sections * ic->journal_section_sectors; 1102 1103 rw_journal_sectors(ic, opf, sector, n_sectors, comp); 1104 } 1105 1106 static void write_journal(struct dm_integrity_c *ic, unsigned int commit_start, unsigned int commit_sections) 1107 { 1108 struct journal_completion io_comp; 1109 struct journal_completion crypt_comp_1; 1110 struct journal_completion crypt_comp_2; 1111 unsigned int i; 1112 1113 io_comp.ic = ic; 1114 init_completion(&io_comp.comp); 1115 1116 if (commit_start + commit_sections <= ic->journal_sections) { 1117 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1); 1118 if (ic->journal_io) { 1119 crypt_comp_1.ic = ic; 1120 init_completion(&crypt_comp_1.comp); 1121 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); 1122 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1); 1123 wait_for_completion_io(&crypt_comp_1.comp); 1124 } else { 1125 for (i = 0; i < commit_sections; i++) 1126 rw_section_mac(ic, commit_start + i, true); 1127 } 1128 rw_journal(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, commit_start, 1129 commit_sections, &io_comp); 1130 } else { 1131 unsigned int to_end; 1132 1133 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2); 1134 to_end = ic->journal_sections - commit_start; 1135 if (ic->journal_io) { 1136 crypt_comp_1.ic = ic; 1137 init_completion(&crypt_comp_1.comp); 1138 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); 1139 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1); 1140 if (try_wait_for_completion(&crypt_comp_1.comp)) { 1141 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, 1142 commit_start, to_end, &io_comp); 1143 reinit_completion(&crypt_comp_1.comp); 1144 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); 1145 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1); 1146 wait_for_completion_io(&crypt_comp_1.comp); 1147 } else { 1148 crypt_comp_2.ic = ic; 1149 init_completion(&crypt_comp_2.comp); 1150 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0); 1151 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2); 1152 wait_for_completion_io(&crypt_comp_1.comp); 1153 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp); 1154 wait_for_completion_io(&crypt_comp_2.comp); 1155 } 1156 } else { 1157 for (i = 0; i < to_end; i++) 1158 rw_section_mac(ic, commit_start + i, true); 1159 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp); 1160 for (i = 0; i < commit_sections - to_end; i++) 1161 rw_section_mac(ic, i, true); 1162 } 1163 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, 0, commit_sections - to_end, &io_comp); 1164 } 1165 1166 wait_for_completion_io(&io_comp.comp); 1167 } 1168 1169 static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset, 1170 unsigned int n_sectors, sector_t target, io_notify_fn fn, void *data) 1171 { 1172 struct dm_io_request io_req; 1173 struct dm_io_region io_loc; 1174 int r; 1175 unsigned int sector, pl_index, pl_offset; 1176 1177 BUG_ON((target | n_sectors | offset) & (unsigned int)(ic->sectors_per_block - 1)); 1178 1179 if (unlikely(dm_integrity_failed(ic))) { 1180 fn(-1UL, data); 1181 return; 1182 } 1183 1184 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset; 1185 1186 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); 1187 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); 1188 1189 io_req.bi_opf = REQ_OP_WRITE; 1190 io_req.mem.type = DM_IO_PAGE_LIST; 1191 io_req.mem.ptr.pl = &ic->journal[pl_index]; 1192 io_req.mem.offset = pl_offset; 1193 io_req.notify.fn = fn; 1194 io_req.notify.context = data; 1195 io_req.client = ic->io; 1196 io_loc.bdev = ic->dev->bdev; 1197 io_loc.sector = target; 1198 io_loc.count = n_sectors; 1199 1200 r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT); 1201 if (unlikely(r)) { 1202 WARN_ONCE(1, "asynchronous dm_io failed: %d", r); 1203 fn(-1UL, data); 1204 } 1205 } 1206 1207 static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2) 1208 { 1209 return range1->logical_sector < range2->logical_sector + range2->n_sectors && 1210 range1->logical_sector + range1->n_sectors > range2->logical_sector; 1211 } 1212 1213 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting) 1214 { 1215 struct rb_node **n = &ic->in_progress.rb_node; 1216 struct rb_node *parent; 1217 1218 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned int)(ic->sectors_per_block - 1)); 1219 1220 if (likely(check_waiting)) { 1221 struct dm_integrity_range *range; 1222 1223 list_for_each_entry(range, &ic->wait_list, wait_entry) { 1224 if (unlikely(ranges_overlap(range, new_range))) 1225 return false; 1226 } 1227 } 1228 1229 parent = NULL; 1230 1231 while (*n) { 1232 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node); 1233 1234 parent = *n; 1235 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) 1236 n = &range->node.rb_left; 1237 else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) 1238 n = &range->node.rb_right; 1239 else 1240 return false; 1241 } 1242 1243 rb_link_node(&new_range->node, parent, n); 1244 rb_insert_color(&new_range->node, &ic->in_progress); 1245 1246 return true; 1247 } 1248 1249 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range) 1250 { 1251 rb_erase(&range->node, &ic->in_progress); 1252 while (unlikely(!list_empty(&ic->wait_list))) { 1253 struct dm_integrity_range *last_range = 1254 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); 1255 struct task_struct *last_range_task; 1256 1257 last_range_task = last_range->task; 1258 list_del(&last_range->wait_entry); 1259 if (!add_new_range(ic, last_range, false)) { 1260 last_range->task = last_range_task; 1261 list_add(&last_range->wait_entry, &ic->wait_list); 1262 break; 1263 } 1264 last_range->waiting = false; 1265 wake_up_process(last_range_task); 1266 } 1267 } 1268 1269 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range) 1270 { 1271 unsigned long flags; 1272 1273 spin_lock_irqsave(&ic->endio_wait.lock, flags); 1274 remove_range_unlocked(ic, range); 1275 spin_unlock_irqrestore(&ic->endio_wait.lock, flags); 1276 } 1277 1278 static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range) 1279 { 1280 new_range->waiting = true; 1281 list_add_tail(&new_range->wait_entry, &ic->wait_list); 1282 new_range->task = current; 1283 do { 1284 __set_current_state(TASK_UNINTERRUPTIBLE); 1285 spin_unlock_irq(&ic->endio_wait.lock); 1286 io_schedule(); 1287 spin_lock_irq(&ic->endio_wait.lock); 1288 } while (unlikely(new_range->waiting)); 1289 } 1290 1291 static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range) 1292 { 1293 if (unlikely(!add_new_range(ic, new_range, true))) 1294 wait_and_add_new_range(ic, new_range); 1295 } 1296 1297 static void init_journal_node(struct journal_node *node) 1298 { 1299 RB_CLEAR_NODE(&node->node); 1300 node->sector = (sector_t)-1; 1301 } 1302 1303 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector) 1304 { 1305 struct rb_node **link; 1306 struct rb_node *parent; 1307 1308 node->sector = sector; 1309 BUG_ON(!RB_EMPTY_NODE(&node->node)); 1310 1311 link = &ic->journal_tree_root.rb_node; 1312 parent = NULL; 1313 1314 while (*link) { 1315 struct journal_node *j; 1316 1317 parent = *link; 1318 j = container_of(parent, struct journal_node, node); 1319 if (sector < j->sector) 1320 link = &j->node.rb_left; 1321 else 1322 link = &j->node.rb_right; 1323 } 1324 1325 rb_link_node(&node->node, parent, link); 1326 rb_insert_color(&node->node, &ic->journal_tree_root); 1327 } 1328 1329 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node) 1330 { 1331 BUG_ON(RB_EMPTY_NODE(&node->node)); 1332 rb_erase(&node->node, &ic->journal_tree_root); 1333 init_journal_node(node); 1334 } 1335 1336 #define NOT_FOUND (-1U) 1337 1338 static unsigned int find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector) 1339 { 1340 struct rb_node *n = ic->journal_tree_root.rb_node; 1341 unsigned int found = NOT_FOUND; 1342 1343 *next_sector = (sector_t)-1; 1344 while (n) { 1345 struct journal_node *j = container_of(n, struct journal_node, node); 1346 1347 if (sector == j->sector) 1348 found = j - ic->journal_tree; 1349 1350 if (sector < j->sector) { 1351 *next_sector = j->sector; 1352 n = j->node.rb_left; 1353 } else 1354 n = j->node.rb_right; 1355 } 1356 1357 return found; 1358 } 1359 1360 static bool test_journal_node(struct dm_integrity_c *ic, unsigned int pos, sector_t sector) 1361 { 1362 struct journal_node *node, *next_node; 1363 struct rb_node *next; 1364 1365 if (unlikely(pos >= ic->journal_entries)) 1366 return false; 1367 node = &ic->journal_tree[pos]; 1368 if (unlikely(RB_EMPTY_NODE(&node->node))) 1369 return false; 1370 if (unlikely(node->sector != sector)) 1371 return false; 1372 1373 next = rb_next(&node->node); 1374 if (unlikely(!next)) 1375 return true; 1376 1377 next_node = container_of(next, struct journal_node, node); 1378 return next_node->sector != sector; 1379 } 1380 1381 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node) 1382 { 1383 struct rb_node *next; 1384 struct journal_node *next_node; 1385 unsigned int next_section; 1386 1387 BUG_ON(RB_EMPTY_NODE(&node->node)); 1388 1389 next = rb_next(&node->node); 1390 if (unlikely(!next)) 1391 return false; 1392 1393 next_node = container_of(next, struct journal_node, node); 1394 1395 if (next_node->sector != node->sector) 1396 return false; 1397 1398 next_section = (unsigned int)(next_node - ic->journal_tree) / ic->journal_section_entries; 1399 if (next_section >= ic->committed_section && 1400 next_section < ic->committed_section + ic->n_committed_sections) 1401 return true; 1402 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections) 1403 return true; 1404 1405 return false; 1406 } 1407 1408 #define TAG_READ 0 1409 #define TAG_WRITE 1 1410 #define TAG_CMP 2 1411 1412 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block, 1413 unsigned int *metadata_offset, unsigned int total_size, int op) 1414 { 1415 unsigned int hash_offset = 0; 1416 unsigned char mismatch_hash = 0; 1417 unsigned char mismatch_filler = !ic->discard; 1418 1419 do { 1420 unsigned char *data, *dp; 1421 struct dm_buffer *b; 1422 unsigned int to_copy; 1423 int r; 1424 1425 r = dm_integrity_failed(ic); 1426 if (unlikely(r)) 1427 return r; 1428 1429 data = dm_bufio_read(ic->bufio, *metadata_block, &b); 1430 if (IS_ERR(data)) 1431 return PTR_ERR(data); 1432 1433 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size); 1434 dp = data + *metadata_offset; 1435 if (op == TAG_READ) { 1436 memcpy(tag, dp, to_copy); 1437 } else if (op == TAG_WRITE) { 1438 if (crypto_memneq(dp, tag, to_copy)) { 1439 memcpy(dp, tag, to_copy); 1440 dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy); 1441 } 1442 } else { 1443 /* e.g.: op == TAG_CMP */ 1444 1445 if (likely(is_power_of_2(ic->tag_size))) { 1446 if (unlikely(crypto_memneq(dp, tag, to_copy))) 1447 goto thorough_test; 1448 } else { 1449 unsigned int i, ts; 1450 thorough_test: 1451 ts = total_size; 1452 1453 for (i = 0; i < to_copy; i++, ts--) { 1454 /* 1455 * Warning: the control flow must not be 1456 * dependent on match/mismatch of 1457 * individual bytes. 1458 */ 1459 mismatch_hash |= dp[i] ^ tag[i]; 1460 mismatch_filler |= dp[i] ^ DISCARD_FILLER; 1461 hash_offset++; 1462 if (unlikely(hash_offset == ic->tag_size)) { 1463 if (unlikely(mismatch_hash) && unlikely(mismatch_filler)) { 1464 dm_bufio_release(b); 1465 return ts; 1466 } 1467 hash_offset = 0; 1468 mismatch_hash = 0; 1469 mismatch_filler = !ic->discard; 1470 } 1471 } 1472 } 1473 } 1474 dm_bufio_release(b); 1475 1476 tag += to_copy; 1477 *metadata_offset += to_copy; 1478 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) { 1479 (*metadata_block)++; 1480 *metadata_offset = 0; 1481 } 1482 1483 if (unlikely(!is_power_of_2(ic->tag_size))) 1484 hash_offset = (hash_offset + to_copy) % ic->tag_size; 1485 1486 total_size -= to_copy; 1487 } while (unlikely(total_size)); 1488 1489 return 0; 1490 } 1491 1492 struct flush_request { 1493 struct dm_io_request io_req; 1494 struct dm_io_region io_reg; 1495 struct dm_integrity_c *ic; 1496 struct completion comp; 1497 }; 1498 1499 static void flush_notify(unsigned long error, void *fr_) 1500 { 1501 struct flush_request *fr = fr_; 1502 1503 if (unlikely(error != 0)) 1504 dm_integrity_io_error(fr->ic, "flushing disk cache", -EIO); 1505 complete(&fr->comp); 1506 } 1507 1508 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data) 1509 { 1510 int r; 1511 struct flush_request fr; 1512 1513 if (!ic->meta_dev) 1514 flush_data = false; 1515 if (flush_data) { 1516 fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1517 fr.io_req.mem.type = DM_IO_KMEM; 1518 fr.io_req.mem.ptr.addr = NULL; 1519 fr.io_req.notify.fn = flush_notify; 1520 fr.io_req.notify.context = &fr; 1521 fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio); 1522 fr.io_reg.bdev = ic->dev->bdev; 1523 fr.io_reg.sector = 0; 1524 fr.io_reg.count = 0; 1525 fr.ic = ic; 1526 init_completion(&fr.comp); 1527 r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL, IOPRIO_DEFAULT); 1528 BUG_ON(r); 1529 } 1530 1531 r = dm_bufio_write_dirty_buffers(ic->bufio); 1532 if (unlikely(r)) 1533 dm_integrity_io_error(ic, "writing tags", r); 1534 1535 if (flush_data) 1536 wait_for_completion(&fr.comp); 1537 } 1538 1539 static void sleep_on_endio_wait(struct dm_integrity_c *ic) 1540 { 1541 DECLARE_WAITQUEUE(wait, current); 1542 1543 __add_wait_queue(&ic->endio_wait, &wait); 1544 __set_current_state(TASK_UNINTERRUPTIBLE); 1545 spin_unlock_irq(&ic->endio_wait.lock); 1546 io_schedule(); 1547 spin_lock_irq(&ic->endio_wait.lock); 1548 __remove_wait_queue(&ic->endio_wait, &wait); 1549 } 1550 1551 static void autocommit_fn(struct timer_list *t) 1552 { 1553 struct dm_integrity_c *ic = timer_container_of(ic, t, 1554 autocommit_timer); 1555 1556 if (likely(!dm_integrity_failed(ic))) 1557 queue_work(ic->commit_wq, &ic->commit_work); 1558 } 1559 1560 static void schedule_autocommit(struct dm_integrity_c *ic) 1561 { 1562 if (!timer_pending(&ic->autocommit_timer)) 1563 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies); 1564 } 1565 1566 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) 1567 { 1568 struct bio *bio; 1569 unsigned long flags; 1570 1571 spin_lock_irqsave(&ic->endio_wait.lock, flags); 1572 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1573 bio_list_add(&ic->flush_bio_list, bio); 1574 spin_unlock_irqrestore(&ic->endio_wait.lock, flags); 1575 1576 queue_work(ic->commit_wq, &ic->commit_work); 1577 } 1578 1579 static void do_endio(struct dm_integrity_c *ic, struct bio *bio) 1580 { 1581 int r; 1582 1583 r = dm_integrity_failed(ic); 1584 if (unlikely(r) && !bio->bi_status) 1585 bio->bi_status = errno_to_blk_status(r); 1586 if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) { 1587 unsigned long flags; 1588 1589 spin_lock_irqsave(&ic->endio_wait.lock, flags); 1590 bio_list_add(&ic->synchronous_bios, bio); 1591 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); 1592 spin_unlock_irqrestore(&ic->endio_wait.lock, flags); 1593 return; 1594 } 1595 bio_endio(bio); 1596 } 1597 1598 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio) 1599 { 1600 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1601 1602 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic))) 1603 submit_flush_bio(ic, dio); 1604 else 1605 do_endio(ic, bio); 1606 } 1607 1608 static void dec_in_flight(struct dm_integrity_io *dio) 1609 { 1610 if (atomic_dec_and_test(&dio->in_flight)) { 1611 struct dm_integrity_c *ic = dio->ic; 1612 struct bio *bio; 1613 1614 remove_range(ic, &dio->range); 1615 1616 if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD)) 1617 schedule_autocommit(ic); 1618 1619 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1620 if (unlikely(dio->bi_status) && !bio->bi_status) 1621 bio->bi_status = dio->bi_status; 1622 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { 1623 dio->range.logical_sector += dio->range.n_sectors; 1624 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT); 1625 INIT_WORK(&dio->work, integrity_bio_wait); 1626 queue_work(ic->offload_wq, &dio->work); 1627 return; 1628 } 1629 do_endio_flush(ic, dio); 1630 } 1631 } 1632 1633 static void integrity_end_io(struct bio *bio) 1634 { 1635 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); 1636 1637 dm_bio_restore(&dio->bio_details, bio); 1638 if (bio->bi_integrity) 1639 bio->bi_opf |= REQ_INTEGRITY; 1640 1641 if (dio->completion) 1642 complete(dio->completion); 1643 1644 dec_in_flight(dio); 1645 } 1646 1647 static void integrity_sector_checksum_shash(struct dm_integrity_c *ic, sector_t sector, 1648 const char *data, unsigned offset, char *result) 1649 { 1650 __le64 sector_le = cpu_to_le64(sector); 1651 SHASH_DESC_ON_STACK(req, ic->internal_shash); 1652 int r; 1653 unsigned int digest_size; 1654 1655 req->tfm = ic->internal_shash; 1656 1657 r = crypto_shash_init(req); 1658 if (unlikely(r < 0)) { 1659 dm_integrity_io_error(ic, "crypto_shash_init", r); 1660 goto failed; 1661 } 1662 1663 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { 1664 r = crypto_shash_update(req, (__u8 *)&ic->sb->salt, SALT_SIZE); 1665 if (unlikely(r < 0)) { 1666 dm_integrity_io_error(ic, "crypto_shash_update", r); 1667 goto failed; 1668 } 1669 } 1670 1671 r = crypto_shash_update(req, (const __u8 *)§or_le, sizeof(sector_le)); 1672 if (unlikely(r < 0)) { 1673 dm_integrity_io_error(ic, "crypto_shash_update", r); 1674 goto failed; 1675 } 1676 1677 r = crypto_shash_update(req, data + offset, ic->sectors_per_block << SECTOR_SHIFT); 1678 if (unlikely(r < 0)) { 1679 dm_integrity_io_error(ic, "crypto_shash_update", r); 1680 goto failed; 1681 } 1682 1683 r = crypto_shash_final(req, result); 1684 if (unlikely(r < 0)) { 1685 dm_integrity_io_error(ic, "crypto_shash_final", r); 1686 goto failed; 1687 } 1688 1689 digest_size = ic->internal_hash_digestsize; 1690 if (unlikely(digest_size < ic->tag_size)) 1691 memset(result + digest_size, 0, ic->tag_size - digest_size); 1692 1693 return; 1694 1695 failed: 1696 /* this shouldn't happen anyway, the hash functions have no reason to fail */ 1697 get_random_bytes(result, ic->tag_size); 1698 } 1699 1700 static void integrity_sector_checksum_ahash(struct dm_integrity_c *ic, struct ahash_request **ahash_req, 1701 sector_t sector, struct page *page, unsigned offset, char *result) 1702 { 1703 __le64 sector_le = cpu_to_le64(sector); 1704 struct ahash_request *req; 1705 DECLARE_CRYPTO_WAIT(wait); 1706 struct scatterlist sg[3], *s = sg; 1707 int r; 1708 unsigned int digest_size; 1709 unsigned int nbytes = 0; 1710 1711 might_sleep(); 1712 1713 req = *ahash_req; 1714 if (unlikely(!req)) { 1715 req = mempool_alloc(&ic->ahash_req_pool, GFP_NOIO); 1716 *ahash_req = req; 1717 } 1718 1719 ahash_request_set_tfm(req, ic->internal_ahash); 1720 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait); 1721 1722 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { 1723 sg_init_table(sg, 3); 1724 sg_set_buf(s, (const __u8 *)&ic->sb->salt, SALT_SIZE); 1725 nbytes += SALT_SIZE; 1726 s++; 1727 } else { 1728 sg_init_table(sg, 2); 1729 } 1730 1731 if (likely(!is_vmalloc_addr(§or_le))) { 1732 sg_set_buf(s, §or_le, sizeof(sector_le)); 1733 } else { 1734 struct page *sec_page = vmalloc_to_page(§or_le); 1735 unsigned int sec_off = offset_in_page(§or_le); 1736 sg_set_page(s, sec_page, sizeof(sector_le), sec_off); 1737 } 1738 nbytes += sizeof(sector_le); 1739 s++; 1740 1741 sg_set_page(s, page, ic->sectors_per_block << SECTOR_SHIFT, offset); 1742 nbytes += ic->sectors_per_block << SECTOR_SHIFT; 1743 1744 ahash_request_set_crypt(req, sg, result, nbytes); 1745 1746 r = crypto_wait_req(crypto_ahash_digest(req), &wait); 1747 if (unlikely(r)) { 1748 dm_integrity_io_error(ic, "crypto_ahash_digest", r); 1749 goto failed; 1750 } 1751 1752 digest_size = ic->internal_hash_digestsize; 1753 if (unlikely(digest_size < ic->tag_size)) 1754 memset(result + digest_size, 0, ic->tag_size - digest_size); 1755 1756 return; 1757 1758 failed: 1759 /* this shouldn't happen anyway, the hash functions have no reason to fail */ 1760 get_random_bytes(result, ic->tag_size); 1761 } 1762 1763 static void integrity_sector_checksum(struct dm_integrity_c *ic, struct ahash_request **ahash_req, 1764 sector_t sector, const char *data, unsigned offset, char *result) 1765 { 1766 if (likely(ic->internal_shash != NULL)) 1767 integrity_sector_checksum_shash(ic, sector, data, offset, result); 1768 else 1769 integrity_sector_checksum_ahash(ic, ahash_req, sector, (struct page *)data, offset, result); 1770 } 1771 1772 static void *integrity_kmap(struct dm_integrity_c *ic, struct page *p) 1773 { 1774 if (likely(ic->internal_shash != NULL)) 1775 return kmap_local_page(p); 1776 else 1777 return p; 1778 } 1779 1780 static void integrity_kunmap(struct dm_integrity_c *ic, const void *ptr) 1781 { 1782 if (likely(ic->internal_shash != NULL)) 1783 kunmap_local(ptr); 1784 } 1785 1786 static void *integrity_identity(struct dm_integrity_c *ic, void *data) 1787 { 1788 #ifdef CONFIG_DEBUG_SG 1789 BUG_ON(offset_in_page(data)); 1790 BUG_ON(!virt_addr_valid(data)); 1791 #endif 1792 if (likely(ic->internal_shash != NULL)) 1793 return data; 1794 else 1795 return virt_to_page(data); 1796 } 1797 1798 static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum) 1799 { 1800 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1801 struct dm_integrity_c *ic = dio->ic; 1802 struct bvec_iter iter; 1803 struct bio_vec bv; 1804 sector_t sector, logical_sector, area, offset; 1805 struct page *page; 1806 1807 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); 1808 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, 1809 &dio->metadata_offset); 1810 sector = get_data_sector(ic, area, offset); 1811 logical_sector = dio->range.logical_sector; 1812 1813 page = mempool_alloc(&ic->recheck_pool, GFP_NOIO); 1814 1815 __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) { 1816 unsigned pos = 0; 1817 1818 do { 1819 sector_t alignment; 1820 char *mem; 1821 char *buffer = page_to_virt(page); 1822 unsigned int buffer_offset; 1823 int r; 1824 struct dm_io_request io_req; 1825 struct dm_io_region io_loc; 1826 io_req.bi_opf = REQ_OP_READ; 1827 io_req.mem.type = DM_IO_KMEM; 1828 io_req.mem.ptr.addr = buffer; 1829 io_req.notify.fn = NULL; 1830 io_req.client = ic->io; 1831 io_loc.bdev = ic->dev->bdev; 1832 io_loc.sector = sector; 1833 io_loc.count = ic->sectors_per_block; 1834 1835 /* Align the bio to logical block size */ 1836 alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT); 1837 alignment &= -alignment; 1838 io_loc.sector = round_down(io_loc.sector, alignment); 1839 io_loc.count += sector - io_loc.sector; 1840 buffer_offset = (sector - io_loc.sector) << SECTOR_SHIFT; 1841 io_loc.count = round_up(io_loc.count, alignment); 1842 1843 r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT); 1844 if (unlikely(r)) { 1845 dio->bi_status = errno_to_blk_status(r); 1846 goto free_ret; 1847 } 1848 1849 integrity_sector_checksum(ic, &dio->ahash_req, logical_sector, integrity_identity(ic, buffer), buffer_offset, checksum); 1850 r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block, 1851 &dio->metadata_offset, ic->tag_size, TAG_CMP); 1852 if (r) { 1853 if (r > 0) { 1854 DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx", 1855 bio->bi_bdev, logical_sector); 1856 atomic64_inc(&ic->number_of_mismatches); 1857 dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum", 1858 bio, logical_sector, 0); 1859 r = -EILSEQ; 1860 } 1861 dio->bi_status = errno_to_blk_status(r); 1862 goto free_ret; 1863 } 1864 1865 mem = bvec_kmap_local(&bv); 1866 memcpy(mem + pos, buffer + buffer_offset, ic->sectors_per_block << SECTOR_SHIFT); 1867 kunmap_local(mem); 1868 1869 pos += ic->sectors_per_block << SECTOR_SHIFT; 1870 sector += ic->sectors_per_block; 1871 logical_sector += ic->sectors_per_block; 1872 } while (pos < bv.bv_len); 1873 } 1874 free_ret: 1875 mempool_free(page, &ic->recheck_pool); 1876 } 1877 1878 static void integrity_metadata(struct work_struct *w) 1879 { 1880 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); 1881 struct dm_integrity_c *ic = dio->ic; 1882 1883 int r; 1884 1885 if (ic->internal_hash) { 1886 struct bvec_iter iter; 1887 struct bio_vec bv; 1888 unsigned int digest_size = ic->internal_hash_digestsize; 1889 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1890 char *checksums; 1891 unsigned int extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0; 1892 char checksums_onstack[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; 1893 sector_t sector; 1894 unsigned int sectors_to_process; 1895 1896 if (unlikely(ic->mode == 'R')) 1897 goto skip_io; 1898 1899 if (likely(dio->op != REQ_OP_DISCARD)) 1900 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space, 1901 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN); 1902 else 1903 checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN); 1904 if (!checksums) { 1905 checksums = checksums_onstack; 1906 if (WARN_ON(extra_space && 1907 digest_size > sizeof(checksums_onstack))) { 1908 r = -EINVAL; 1909 goto error; 1910 } 1911 } 1912 1913 if (unlikely(dio->op == REQ_OP_DISCARD)) { 1914 unsigned int bi_size = dio->bio_details.bi_iter.bi_size; 1915 unsigned int max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE; 1916 unsigned int max_blocks = max_size / ic->tag_size; 1917 1918 memset(checksums, DISCARD_FILLER, max_size); 1919 1920 while (bi_size) { 1921 unsigned int this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block); 1922 1923 this_step_blocks = min(this_step_blocks, max_blocks); 1924 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, 1925 this_step_blocks * ic->tag_size, TAG_WRITE); 1926 if (unlikely(r)) { 1927 if (likely(checksums != checksums_onstack)) 1928 kfree(checksums); 1929 goto error; 1930 } 1931 1932 bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block); 1933 } 1934 1935 if (likely(checksums != checksums_onstack)) 1936 kfree(checksums); 1937 goto skip_io; 1938 } 1939 1940 sector = dio->range.logical_sector; 1941 sectors_to_process = dio->range.n_sectors; 1942 1943 __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) { 1944 struct bio_vec bv_copy = bv; 1945 unsigned int pos; 1946 char *mem, *checksums_ptr; 1947 1948 again: 1949 mem = integrity_kmap(ic, bv_copy.bv_page); 1950 pos = 0; 1951 checksums_ptr = checksums; 1952 do { 1953 integrity_sector_checksum(ic, &dio->ahash_req, sector, mem, bv_copy.bv_offset + pos, checksums_ptr); 1954 checksums_ptr += ic->tag_size; 1955 sectors_to_process -= ic->sectors_per_block; 1956 pos += ic->sectors_per_block << SECTOR_SHIFT; 1957 sector += ic->sectors_per_block; 1958 } while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack); 1959 integrity_kunmap(ic, mem); 1960 1961 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, 1962 checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE); 1963 if (unlikely(r)) { 1964 if (likely(checksums != checksums_onstack)) 1965 kfree(checksums); 1966 if (r > 0) { 1967 integrity_recheck(dio, checksums_onstack); 1968 goto skip_io; 1969 } 1970 goto error; 1971 } 1972 1973 if (!sectors_to_process) 1974 break; 1975 1976 if (unlikely(pos < bv_copy.bv_len)) { 1977 bv_copy.bv_offset += pos; 1978 bv_copy.bv_len -= pos; 1979 goto again; 1980 } 1981 } 1982 1983 if (likely(checksums != checksums_onstack)) 1984 kfree(checksums); 1985 } else { 1986 struct bio_integrity_payload *bip = dio->bio_details.bi_integrity; 1987 1988 if (bip) { 1989 struct bio_vec biv; 1990 struct bvec_iter iter; 1991 unsigned int data_to_process = dio->range.n_sectors; 1992 1993 sector_to_block(ic, data_to_process); 1994 data_to_process *= ic->tag_size; 1995 1996 bip_for_each_vec(biv, bip, iter) { 1997 unsigned char *tag; 1998 unsigned int this_len; 1999 2000 BUG_ON(PageHighMem(biv.bv_page)); 2001 tag = bvec_virt(&biv); 2002 this_len = min(biv.bv_len, data_to_process); 2003 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset, 2004 this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE); 2005 if (unlikely(r)) 2006 goto error; 2007 data_to_process -= this_len; 2008 if (!data_to_process) 2009 break; 2010 } 2011 } 2012 } 2013 skip_io: 2014 dec_in_flight(dio); 2015 return; 2016 error: 2017 dio->bi_status = errno_to_blk_status(r); 2018 dec_in_flight(dio); 2019 } 2020 2021 static inline bool dm_integrity_check_limits(struct dm_integrity_c *ic, sector_t logical_sector, struct bio *bio) 2022 { 2023 if (unlikely(logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) { 2024 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx", 2025 logical_sector, bio_sectors(bio), 2026 ic->provided_data_sectors); 2027 return false; 2028 } 2029 if (unlikely((logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) { 2030 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x", 2031 ic->sectors_per_block, 2032 logical_sector, bio_sectors(bio)); 2033 return false; 2034 } 2035 if (ic->sectors_per_block > 1 && likely(bio_op(bio) != REQ_OP_DISCARD)) { 2036 struct bvec_iter iter; 2037 struct bio_vec bv; 2038 2039 bio_for_each_segment(bv, bio, iter) { 2040 if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { 2041 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary", 2042 bv.bv_offset, bv.bv_len, ic->sectors_per_block); 2043 return false; 2044 } 2045 } 2046 } 2047 return true; 2048 } 2049 2050 static int dm_integrity_map(struct dm_target *ti, struct bio *bio) 2051 { 2052 struct dm_integrity_c *ic = ti->private; 2053 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); 2054 struct bio_integrity_payload *bip; 2055 2056 sector_t area, offset; 2057 2058 dio->ic = ic; 2059 dio->bi_status = 0; 2060 dio->op = bio_op(bio); 2061 dio->ahash_req = NULL; 2062 2063 if (ic->mode == 'I') { 2064 bio->bi_iter.bi_sector = dm_target_offset(ic->ti, bio->bi_iter.bi_sector); 2065 dio->integrity_payload = NULL; 2066 dio->integrity_payload_from_mempool = false; 2067 dio->integrity_range_locked = false; 2068 return dm_integrity_map_inline(dio, true); 2069 } 2070 2071 if (unlikely(dio->op == REQ_OP_DISCARD)) { 2072 if (ti->max_io_len) { 2073 sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector); 2074 unsigned int log2_max_io_len = __fls(ti->max_io_len); 2075 sector_t start_boundary = sec >> log2_max_io_len; 2076 sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len; 2077 2078 if (start_boundary < end_boundary) { 2079 sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1)); 2080 2081 dm_accept_partial_bio(bio, len); 2082 } 2083 } 2084 } 2085 2086 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { 2087 submit_flush_bio(ic, dio); 2088 return DM_MAPIO_SUBMITTED; 2089 } 2090 2091 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); 2092 dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA; 2093 if (unlikely(dio->fua)) { 2094 /* 2095 * Don't pass down the FUA flag because we have to flush 2096 * disk cache anyway. 2097 */ 2098 bio->bi_opf &= ~REQ_FUA; 2099 } 2100 if (unlikely(!dm_integrity_check_limits(ic, dio->range.logical_sector, bio))) 2101 return DM_MAPIO_KILL; 2102 2103 bip = bio_integrity(bio); 2104 if (!ic->internal_hash) { 2105 if (bip) { 2106 unsigned int wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block; 2107 2108 if (ic->log2_tag_size >= 0) 2109 wanted_tag_size <<= ic->log2_tag_size; 2110 else 2111 wanted_tag_size *= ic->tag_size; 2112 if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) { 2113 DMERR("Invalid integrity data size %u, expected %u", 2114 bip->bip_iter.bi_size, wanted_tag_size); 2115 return DM_MAPIO_KILL; 2116 } 2117 } 2118 } else { 2119 if (unlikely(bip != NULL)) { 2120 DMERR("Unexpected integrity data when using internal hash"); 2121 return DM_MAPIO_KILL; 2122 } 2123 } 2124 2125 if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ)) 2126 return DM_MAPIO_KILL; 2127 2128 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); 2129 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); 2130 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset); 2131 2132 dm_integrity_map_continue(dio, true); 2133 return DM_MAPIO_SUBMITTED; 2134 } 2135 2136 static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio, 2137 unsigned int journal_section, unsigned int journal_entry) 2138 { 2139 struct dm_integrity_c *ic = dio->ic; 2140 sector_t logical_sector; 2141 unsigned int n_sectors; 2142 2143 logical_sector = dio->range.logical_sector; 2144 n_sectors = dio->range.n_sectors; 2145 do { 2146 struct bio_vec bv = bio_iovec(bio); 2147 char *mem; 2148 2149 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors)) 2150 bv.bv_len = n_sectors << SECTOR_SHIFT; 2151 n_sectors -= bv.bv_len >> SECTOR_SHIFT; 2152 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len); 2153 retry_kmap: 2154 mem = kmap_local_page(bv.bv_page); 2155 if (likely(dio->op == REQ_OP_WRITE)) 2156 flush_dcache_page(bv.bv_page); 2157 2158 do { 2159 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry); 2160 2161 if (unlikely(dio->op == REQ_OP_READ)) { 2162 struct journal_sector *js; 2163 char *mem_ptr; 2164 unsigned int s; 2165 2166 if (unlikely(journal_entry_is_inprogress(je))) { 2167 flush_dcache_page(bv.bv_page); 2168 kunmap_local(mem); 2169 2170 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); 2171 goto retry_kmap; 2172 } 2173 smp_rmb(); 2174 BUG_ON(journal_entry_get_sector(je) != logical_sector); 2175 js = access_journal_data(ic, journal_section, journal_entry); 2176 mem_ptr = mem + bv.bv_offset; 2177 s = 0; 2178 do { 2179 memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA); 2180 *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s]; 2181 js++; 2182 mem_ptr += 1 << SECTOR_SHIFT; 2183 } while (++s < ic->sectors_per_block); 2184 } 2185 2186 if (!ic->internal_hash) { 2187 struct bio_integrity_payload *bip = bio_integrity(bio); 2188 unsigned int tag_todo = ic->tag_size; 2189 char *tag_ptr = journal_entry_tag(ic, je); 2190 2191 if (bip) { 2192 do { 2193 struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter); 2194 unsigned int tag_now = min(biv.bv_len, tag_todo); 2195 char *tag_addr; 2196 2197 BUG_ON(PageHighMem(biv.bv_page)); 2198 tag_addr = bvec_virt(&biv); 2199 if (likely(dio->op == REQ_OP_WRITE)) 2200 memcpy(tag_ptr, tag_addr, tag_now); 2201 else 2202 memcpy(tag_addr, tag_ptr, tag_now); 2203 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now); 2204 tag_ptr += tag_now; 2205 tag_todo -= tag_now; 2206 } while (unlikely(tag_todo)); 2207 } else if (likely(dio->op == REQ_OP_WRITE)) 2208 memset(tag_ptr, 0, tag_todo); 2209 } 2210 2211 if (likely(dio->op == REQ_OP_WRITE)) { 2212 struct journal_sector *js; 2213 unsigned int s; 2214 2215 js = access_journal_data(ic, journal_section, journal_entry); 2216 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT); 2217 2218 s = 0; 2219 do { 2220 je->last_bytes[s] = js[s].commit_id; 2221 } while (++s < ic->sectors_per_block); 2222 2223 if (ic->internal_hash) { 2224 unsigned int digest_size = ic->internal_hash_digestsize; 2225 void *js_page = integrity_identity(ic, (char *)js - offset_in_page(js)); 2226 unsigned js_offset = offset_in_page(js); 2227 2228 if (unlikely(digest_size > ic->tag_size)) { 2229 char checksums_onstack[HASH_MAX_DIGESTSIZE]; 2230 2231 integrity_sector_checksum(ic, &dio->ahash_req, logical_sector, js_page, js_offset, checksums_onstack); 2232 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size); 2233 } else 2234 integrity_sector_checksum(ic, &dio->ahash_req, logical_sector, js_page, js_offset, journal_entry_tag(ic, je)); 2235 } 2236 2237 journal_entry_set_sector(je, logical_sector); 2238 } 2239 logical_sector += ic->sectors_per_block; 2240 2241 journal_entry++; 2242 if (unlikely(journal_entry == ic->journal_section_entries)) { 2243 journal_entry = 0; 2244 journal_section++; 2245 wraparound_section(ic, &journal_section); 2246 } 2247 2248 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT; 2249 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT); 2250 2251 if (unlikely(dio->op == REQ_OP_READ)) 2252 flush_dcache_page(bv.bv_page); 2253 kunmap_local(mem); 2254 } while (n_sectors); 2255 2256 if (likely(dio->op == REQ_OP_WRITE)) { 2257 smp_mb(); 2258 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait))) 2259 wake_up(&ic->copy_to_journal_wait); 2260 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) 2261 queue_work(ic->commit_wq, &ic->commit_work); 2262 else 2263 schedule_autocommit(ic); 2264 } else 2265 remove_range(ic, &dio->range); 2266 2267 if (unlikely(bio->bi_iter.bi_size)) { 2268 sector_t area, offset; 2269 2270 dio->range.logical_sector = logical_sector; 2271 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); 2272 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); 2273 return true; 2274 } 2275 2276 return false; 2277 } 2278 2279 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map) 2280 { 2281 struct dm_integrity_c *ic = dio->ic; 2282 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 2283 unsigned int journal_section, journal_entry; 2284 unsigned int journal_read_pos; 2285 sector_t recalc_sector; 2286 struct completion read_comp; 2287 bool discard_retried = false; 2288 bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ; 2289 2290 if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D') 2291 need_sync_io = true; 2292 2293 if (need_sync_io && from_map) { 2294 INIT_WORK(&dio->work, integrity_bio_wait); 2295 queue_work(ic->offload_wq, &dio->work); 2296 return; 2297 } 2298 2299 lock_retry: 2300 spin_lock_irq(&ic->endio_wait.lock); 2301 retry: 2302 if (unlikely(dm_integrity_failed(ic))) { 2303 spin_unlock_irq(&ic->endio_wait.lock); 2304 do_endio(ic, bio); 2305 return; 2306 } 2307 dio->range.n_sectors = bio_sectors(bio); 2308 journal_read_pos = NOT_FOUND; 2309 if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) { 2310 if (dio->op == REQ_OP_WRITE) { 2311 unsigned int next_entry, i, pos; 2312 unsigned int ws, we, range_sectors; 2313 2314 dio->range.n_sectors = min(dio->range.n_sectors, 2315 (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block); 2316 if (unlikely(!dio->range.n_sectors)) { 2317 if (from_map) 2318 goto offload_to_thread; 2319 sleep_on_endio_wait(ic); 2320 goto retry; 2321 } 2322 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block; 2323 ic->free_sectors -= range_sectors; 2324 journal_section = ic->free_section; 2325 journal_entry = ic->free_section_entry; 2326 2327 next_entry = ic->free_section_entry + range_sectors; 2328 ic->free_section_entry = next_entry % ic->journal_section_entries; 2329 ic->free_section += next_entry / ic->journal_section_entries; 2330 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries; 2331 wraparound_section(ic, &ic->free_section); 2332 2333 pos = journal_section * ic->journal_section_entries + journal_entry; 2334 ws = journal_section; 2335 we = journal_entry; 2336 i = 0; 2337 do { 2338 struct journal_entry *je; 2339 2340 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i); 2341 pos++; 2342 if (unlikely(pos >= ic->journal_entries)) 2343 pos = 0; 2344 2345 je = access_journal_entry(ic, ws, we); 2346 BUG_ON(!journal_entry_is_unused(je)); 2347 journal_entry_set_inprogress(je); 2348 we++; 2349 if (unlikely(we == ic->journal_section_entries)) { 2350 we = 0; 2351 ws++; 2352 wraparound_section(ic, &ws); 2353 } 2354 } while ((i += ic->sectors_per_block) < dio->range.n_sectors); 2355 2356 spin_unlock_irq(&ic->endio_wait.lock); 2357 goto journal_read_write; 2358 } else { 2359 sector_t next_sector; 2360 2361 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); 2362 if (likely(journal_read_pos == NOT_FOUND)) { 2363 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector)) 2364 dio->range.n_sectors = next_sector - dio->range.logical_sector; 2365 } else { 2366 unsigned int i; 2367 unsigned int jp = journal_read_pos + 1; 2368 2369 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) { 2370 if (!test_journal_node(ic, jp, dio->range.logical_sector + i)) 2371 break; 2372 } 2373 dio->range.n_sectors = i; 2374 } 2375 } 2376 } 2377 if (unlikely(!add_new_range(ic, &dio->range, true))) { 2378 /* 2379 * We must not sleep in the request routine because it could 2380 * stall bios on current->bio_list. 2381 * So, we offload the bio to a workqueue if we have to sleep. 2382 */ 2383 if (from_map) { 2384 offload_to_thread: 2385 spin_unlock_irq(&ic->endio_wait.lock); 2386 INIT_WORK(&dio->work, integrity_bio_wait); 2387 queue_work(ic->wait_wq, &dio->work); 2388 return; 2389 } 2390 if (journal_read_pos != NOT_FOUND) 2391 dio->range.n_sectors = ic->sectors_per_block; 2392 wait_and_add_new_range(ic, &dio->range); 2393 /* 2394 * wait_and_add_new_range drops the spinlock, so the journal 2395 * may have been changed arbitrarily. We need to recheck. 2396 * To simplify the code, we restrict I/O size to just one block. 2397 */ 2398 if (journal_read_pos != NOT_FOUND) { 2399 sector_t next_sector; 2400 unsigned int new_pos; 2401 2402 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); 2403 if (unlikely(new_pos != journal_read_pos)) { 2404 remove_range_unlocked(ic, &dio->range); 2405 goto retry; 2406 } 2407 } 2408 } 2409 if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) { 2410 sector_t next_sector; 2411 unsigned int new_pos; 2412 2413 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); 2414 if (unlikely(new_pos != NOT_FOUND) || 2415 unlikely(next_sector < dio->range.logical_sector + dio->range.n_sectors)) { 2416 remove_range_unlocked(ic, &dio->range); 2417 spin_unlock_irq(&ic->endio_wait.lock); 2418 queue_work(ic->commit_wq, &ic->commit_work); 2419 flush_workqueue(ic->commit_wq); 2420 queue_work(ic->writer_wq, &ic->writer_work); 2421 flush_workqueue(ic->writer_wq); 2422 discard_retried = true; 2423 goto lock_retry; 2424 } 2425 } 2426 recalc_sector = le64_to_cpu(ic->sb->recalc_sector); 2427 spin_unlock_irq(&ic->endio_wait.lock); 2428 2429 if (unlikely(journal_read_pos != NOT_FOUND)) { 2430 journal_section = journal_read_pos / ic->journal_section_entries; 2431 journal_entry = journal_read_pos % ic->journal_section_entries; 2432 goto journal_read_write; 2433 } 2434 2435 if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) { 2436 if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, 2437 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) { 2438 struct bitmap_block_status *bbs; 2439 2440 bbs = sector_to_bitmap_block(ic, dio->range.logical_sector); 2441 spin_lock(&bbs->bio_queue_lock); 2442 bio_list_add(&bbs->bio_queue, bio); 2443 spin_unlock(&bbs->bio_queue_lock); 2444 queue_work(ic->writer_wq, &bbs->work); 2445 return; 2446 } 2447 } 2448 2449 dio->in_flight = (atomic_t)ATOMIC_INIT(2); 2450 2451 if (need_sync_io) { 2452 init_completion(&read_comp); 2453 dio->completion = &read_comp; 2454 } else 2455 dio->completion = NULL; 2456 2457 dm_bio_record(&dio->bio_details, bio); 2458 bio_set_dev(bio, ic->dev->bdev); 2459 bio->bi_integrity = NULL; 2460 bio->bi_opf &= ~REQ_INTEGRITY; 2461 bio->bi_end_io = integrity_end_io; 2462 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT; 2463 2464 if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) { 2465 integrity_metadata(&dio->work); 2466 dm_integrity_flush_buffers(ic, false); 2467 2468 dio->in_flight = (atomic_t)ATOMIC_INIT(1); 2469 dio->completion = NULL; 2470 2471 submit_bio_noacct(bio); 2472 2473 return; 2474 } 2475 2476 submit_bio_noacct(bio); 2477 2478 if (need_sync_io) { 2479 wait_for_completion_io(&read_comp); 2480 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && 2481 dio->range.logical_sector + dio->range.n_sectors > recalc_sector) 2482 goto skip_check; 2483 if (ic->mode == 'B') { 2484 if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector, 2485 dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) 2486 goto skip_check; 2487 } 2488 2489 if (likely(!bio->bi_status)) 2490 integrity_metadata(&dio->work); 2491 else 2492 skip_check: 2493 dec_in_flight(dio); 2494 } else { 2495 INIT_WORK(&dio->work, integrity_metadata); 2496 queue_work(ic->metadata_wq, &dio->work); 2497 } 2498 2499 return; 2500 2501 journal_read_write: 2502 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry))) 2503 goto lock_retry; 2504 2505 do_endio_flush(ic, dio); 2506 } 2507 2508 static int dm_integrity_map_inline(struct dm_integrity_io *dio, bool from_map) 2509 { 2510 struct dm_integrity_c *ic = dio->ic; 2511 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 2512 struct bio_integrity_payload *bip; 2513 unsigned ret; 2514 sector_t recalc_sector; 2515 2516 if (unlikely(bio_integrity(bio))) { 2517 bio->bi_status = BLK_STS_NOTSUPP; 2518 bio_endio(bio); 2519 return DM_MAPIO_SUBMITTED; 2520 } 2521 2522 bio_set_dev(bio, ic->dev->bdev); 2523 if (unlikely((bio->bi_opf & REQ_PREFLUSH) != 0)) 2524 return DM_MAPIO_REMAPPED; 2525 2526 retry: 2527 if (!dio->integrity_payload) { 2528 unsigned digest_size, extra_size; 2529 dio->payload_len = ic->tuple_size * (bio_sectors(bio) >> ic->sb->log2_sectors_per_block); 2530 digest_size = ic->internal_hash_digestsize; 2531 extra_size = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0; 2532 dio->payload_len += extra_size; 2533 dio->integrity_payload = kmalloc(dio->payload_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); 2534 if (unlikely(!dio->integrity_payload)) { 2535 const unsigned x_size = PAGE_SIZE << 1; 2536 if (dio->payload_len > x_size) { 2537 unsigned sectors = ((x_size - extra_size) / ic->tuple_size) << ic->sb->log2_sectors_per_block; 2538 if (WARN_ON(!sectors || sectors >= bio_sectors(bio))) { 2539 bio->bi_status = BLK_STS_NOTSUPP; 2540 bio_endio(bio); 2541 return DM_MAPIO_SUBMITTED; 2542 } 2543 dm_accept_partial_bio(bio, sectors); 2544 goto retry; 2545 } 2546 } 2547 } 2548 2549 dio->range.logical_sector = bio->bi_iter.bi_sector; 2550 dio->range.n_sectors = bio_sectors(bio); 2551 2552 if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) 2553 goto skip_spinlock; 2554 #ifdef CONFIG_64BIT 2555 /* 2556 * On 64-bit CPUs we can optimize the lock away (so that it won't cause 2557 * cache line bouncing) and use acquire/release barriers instead. 2558 * 2559 * Paired with smp_store_release in integrity_recalc_inline. 2560 */ 2561 recalc_sector = le64_to_cpu(smp_load_acquire(&ic->sb->recalc_sector)); 2562 if (likely(dio->range.logical_sector + dio->range.n_sectors <= recalc_sector)) 2563 goto skip_spinlock; 2564 #endif 2565 spin_lock_irq(&ic->endio_wait.lock); 2566 recalc_sector = le64_to_cpu(ic->sb->recalc_sector); 2567 if (dio->range.logical_sector + dio->range.n_sectors <= recalc_sector) 2568 goto skip_unlock; 2569 if (unlikely(!add_new_range(ic, &dio->range, true))) { 2570 if (from_map) { 2571 spin_unlock_irq(&ic->endio_wait.lock); 2572 INIT_WORK(&dio->work, integrity_bio_wait); 2573 queue_work(ic->wait_wq, &dio->work); 2574 return DM_MAPIO_SUBMITTED; 2575 } 2576 wait_and_add_new_range(ic, &dio->range); 2577 } 2578 dio->integrity_range_locked = true; 2579 skip_unlock: 2580 spin_unlock_irq(&ic->endio_wait.lock); 2581 skip_spinlock: 2582 2583 if (unlikely(!dio->integrity_payload)) { 2584 dio->integrity_payload = page_to_virt((struct page *)mempool_alloc(&ic->recheck_pool, GFP_NOIO)); 2585 dio->integrity_payload_from_mempool = true; 2586 } 2587 2588 dio->bio_details.bi_iter = bio->bi_iter; 2589 2590 if (unlikely(!dm_integrity_check_limits(ic, bio->bi_iter.bi_sector, bio))) { 2591 return DM_MAPIO_KILL; 2592 } 2593 2594 bio->bi_iter.bi_sector += ic->start + SB_SECTORS; 2595 2596 bip = bio_integrity_alloc(bio, GFP_NOIO, 1); 2597 if (IS_ERR(bip)) { 2598 bio->bi_status = errno_to_blk_status(PTR_ERR(bip)); 2599 bio_endio(bio); 2600 return DM_MAPIO_SUBMITTED; 2601 } 2602 2603 if (dio->op == REQ_OP_WRITE) { 2604 unsigned pos = 0; 2605 while (dio->bio_details.bi_iter.bi_size) { 2606 struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter); 2607 const char *mem = integrity_kmap(ic, bv.bv_page); 2608 if (ic->tag_size < ic->tuple_size) 2609 memset(dio->integrity_payload + pos + ic->tag_size, 0, ic->tuple_size - ic->tuple_size); 2610 integrity_sector_checksum(ic, &dio->ahash_req, dio->bio_details.bi_iter.bi_sector, mem, bv.bv_offset, dio->integrity_payload + pos); 2611 integrity_kunmap(ic, mem); 2612 pos += ic->tuple_size; 2613 bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT); 2614 } 2615 } 2616 2617 ret = bio_integrity_add_page(bio, virt_to_page(dio->integrity_payload), 2618 dio->payload_len, offset_in_page(dio->integrity_payload)); 2619 if (unlikely(ret != dio->payload_len)) { 2620 bio->bi_status = BLK_STS_RESOURCE; 2621 bio_endio(bio); 2622 return DM_MAPIO_SUBMITTED; 2623 } 2624 2625 return DM_MAPIO_REMAPPED; 2626 } 2627 2628 static inline void dm_integrity_free_payload(struct dm_integrity_io *dio) 2629 { 2630 struct dm_integrity_c *ic = dio->ic; 2631 if (unlikely(dio->integrity_payload_from_mempool)) 2632 mempool_free(virt_to_page(dio->integrity_payload), &ic->recheck_pool); 2633 else 2634 kfree(dio->integrity_payload); 2635 dio->integrity_payload = NULL; 2636 dio->integrity_payload_from_mempool = false; 2637 } 2638 2639 static void dm_integrity_inline_recheck(struct work_struct *w) 2640 { 2641 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); 2642 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 2643 struct dm_integrity_c *ic = dio->ic; 2644 struct bio *outgoing_bio; 2645 void *outgoing_data; 2646 2647 dio->integrity_payload = page_to_virt((struct page *)mempool_alloc(&ic->recheck_pool, GFP_NOIO)); 2648 dio->integrity_payload_from_mempool = true; 2649 2650 outgoing_data = dio->integrity_payload + PAGE_SIZE; 2651 2652 while (dio->bio_details.bi_iter.bi_size) { 2653 char digest[HASH_MAX_DIGESTSIZE]; 2654 int r; 2655 struct bio_integrity_payload *bip; 2656 struct bio_vec bv; 2657 char *mem; 2658 2659 outgoing_bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_READ, GFP_NOIO, &ic->recheck_bios); 2660 bio_add_virt_nofail(outgoing_bio, outgoing_data, 2661 ic->sectors_per_block << SECTOR_SHIFT); 2662 2663 bip = bio_integrity_alloc(outgoing_bio, GFP_NOIO, 1); 2664 if (IS_ERR(bip)) { 2665 bio_put(outgoing_bio); 2666 bio->bi_status = errno_to_blk_status(PTR_ERR(bip)); 2667 bio_endio(bio); 2668 return; 2669 } 2670 2671 r = bio_integrity_add_page(outgoing_bio, virt_to_page(dio->integrity_payload), ic->tuple_size, 0); 2672 if (unlikely(r != ic->tuple_size)) { 2673 bio_put(outgoing_bio); 2674 bio->bi_status = BLK_STS_RESOURCE; 2675 bio_endio(bio); 2676 return; 2677 } 2678 2679 outgoing_bio->bi_iter.bi_sector = dio->bio_details.bi_iter.bi_sector + ic->start + SB_SECTORS; 2680 2681 r = submit_bio_wait(outgoing_bio); 2682 if (unlikely(r != 0)) { 2683 bio_put(outgoing_bio); 2684 bio->bi_status = errno_to_blk_status(r); 2685 bio_endio(bio); 2686 return; 2687 } 2688 bio_put(outgoing_bio); 2689 2690 integrity_sector_checksum(ic, &dio->ahash_req, dio->bio_details.bi_iter.bi_sector, integrity_identity(ic, outgoing_data), 0, digest); 2691 if (unlikely(crypto_memneq(digest, dio->integrity_payload, min(ic->internal_hash_digestsize, ic->tag_size)))) { 2692 DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx", 2693 ic->dev->bdev, dio->bio_details.bi_iter.bi_sector); 2694 atomic64_inc(&ic->number_of_mismatches); 2695 dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum", 2696 bio, dio->bio_details.bi_iter.bi_sector, 0); 2697 2698 bio->bi_status = BLK_STS_PROTECTION; 2699 bio_endio(bio); 2700 return; 2701 } 2702 2703 bv = bio_iter_iovec(bio, dio->bio_details.bi_iter); 2704 mem = bvec_kmap_local(&bv); 2705 memcpy(mem, outgoing_data, ic->sectors_per_block << SECTOR_SHIFT); 2706 kunmap_local(mem); 2707 2708 bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT); 2709 } 2710 2711 bio_endio(bio); 2712 } 2713 2714 static inline bool dm_integrity_check(struct dm_integrity_c *ic, struct dm_integrity_io *dio) 2715 { 2716 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 2717 unsigned pos = 0; 2718 2719 while (dio->bio_details.bi_iter.bi_size) { 2720 char digest[HASH_MAX_DIGESTSIZE]; 2721 struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter); 2722 char *mem = integrity_kmap(ic, bv.bv_page); 2723 integrity_sector_checksum(ic, &dio->ahash_req, dio->bio_details.bi_iter.bi_sector, mem, bv.bv_offset, digest); 2724 if (unlikely(crypto_memneq(digest, dio->integrity_payload + pos, 2725 min(ic->internal_hash_digestsize, ic->tag_size)))) { 2726 integrity_kunmap(ic, mem); 2727 dm_integrity_free_payload(dio); 2728 INIT_WORK(&dio->work, dm_integrity_inline_recheck); 2729 queue_work(ic->offload_wq, &dio->work); 2730 return false; 2731 } 2732 integrity_kunmap(ic, mem); 2733 pos += ic->tuple_size; 2734 bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT); 2735 } 2736 2737 return true; 2738 } 2739 2740 static void dm_integrity_inline_async_check(struct work_struct *w) 2741 { 2742 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); 2743 struct dm_integrity_c *ic = dio->ic; 2744 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 2745 2746 if (likely(dm_integrity_check(ic, dio))) 2747 bio_endio(bio); 2748 } 2749 2750 static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status) 2751 { 2752 struct dm_integrity_c *ic = ti->private; 2753 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); 2754 if (ic->mode == 'I') { 2755 if (dio->op == REQ_OP_READ && likely(*status == BLK_STS_OK) && likely(dio->bio_details.bi_iter.bi_size != 0)) { 2756 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && 2757 unlikely(dio->integrity_range_locked)) 2758 goto skip_check; 2759 if (likely(ic->internal_shash != NULL)) { 2760 if (unlikely(!dm_integrity_check(ic, dio))) 2761 return DM_ENDIO_INCOMPLETE; 2762 } else { 2763 INIT_WORK(&dio->work, dm_integrity_inline_async_check); 2764 queue_work(ic->offload_wq, &dio->work); 2765 return DM_ENDIO_INCOMPLETE; 2766 } 2767 } 2768 skip_check: 2769 dm_integrity_free_payload(dio); 2770 if (unlikely(dio->integrity_range_locked)) 2771 remove_range(ic, &dio->range); 2772 } 2773 if (unlikely(dio->ahash_req)) 2774 mempool_free(dio->ahash_req, &ic->ahash_req_pool); 2775 return DM_ENDIO_DONE; 2776 } 2777 2778 static void integrity_bio_wait(struct work_struct *w) 2779 { 2780 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); 2781 struct dm_integrity_c *ic = dio->ic; 2782 2783 if (ic->mode == 'I') { 2784 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 2785 int r = dm_integrity_map_inline(dio, false); 2786 switch (r) { 2787 case DM_MAPIO_KILL: 2788 bio->bi_status = BLK_STS_IOERR; 2789 fallthrough; 2790 case DM_MAPIO_REMAPPED: 2791 submit_bio_noacct(bio); 2792 fallthrough; 2793 case DM_MAPIO_SUBMITTED: 2794 return; 2795 default: 2796 BUG(); 2797 } 2798 } else { 2799 dm_integrity_map_continue(dio, false); 2800 } 2801 } 2802 2803 static void pad_uncommitted(struct dm_integrity_c *ic) 2804 { 2805 if (ic->free_section_entry) { 2806 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry; 2807 ic->free_section_entry = 0; 2808 ic->free_section++; 2809 wraparound_section(ic, &ic->free_section); 2810 ic->n_uncommitted_sections++; 2811 } 2812 if (WARN_ON(ic->journal_sections * ic->journal_section_entries != 2813 (ic->n_uncommitted_sections + ic->n_committed_sections) * 2814 ic->journal_section_entries + ic->free_sectors)) { 2815 DMCRIT("journal_sections %u, journal_section_entries %u, " 2816 "n_uncommitted_sections %u, n_committed_sections %u, " 2817 "journal_section_entries %u, free_sectors %u", 2818 ic->journal_sections, ic->journal_section_entries, 2819 ic->n_uncommitted_sections, ic->n_committed_sections, 2820 ic->journal_section_entries, ic->free_sectors); 2821 } 2822 } 2823 2824 static void integrity_commit(struct work_struct *w) 2825 { 2826 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work); 2827 unsigned int commit_start, commit_sections; 2828 unsigned int i, j, n; 2829 struct bio *flushes; 2830 2831 timer_delete(&ic->autocommit_timer); 2832 2833 if (ic->mode == 'I') 2834 return; 2835 2836 spin_lock_irq(&ic->endio_wait.lock); 2837 flushes = bio_list_get(&ic->flush_bio_list); 2838 if (unlikely(ic->mode != 'J')) { 2839 spin_unlock_irq(&ic->endio_wait.lock); 2840 dm_integrity_flush_buffers(ic, true); 2841 goto release_flush_bios; 2842 } 2843 2844 pad_uncommitted(ic); 2845 commit_start = ic->uncommitted_section; 2846 commit_sections = ic->n_uncommitted_sections; 2847 spin_unlock_irq(&ic->endio_wait.lock); 2848 2849 if (!commit_sections) 2850 goto release_flush_bios; 2851 2852 ic->wrote_to_journal = true; 2853 2854 i = commit_start; 2855 for (n = 0; n < commit_sections; n++) { 2856 for (j = 0; j < ic->journal_section_entries; j++) { 2857 struct journal_entry *je; 2858 2859 je = access_journal_entry(ic, i, j); 2860 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); 2861 } 2862 for (j = 0; j < ic->journal_section_sectors; j++) { 2863 struct journal_sector *js; 2864 2865 js = access_journal(ic, i, j); 2866 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq); 2867 } 2868 i++; 2869 if (unlikely(i >= ic->journal_sections)) 2870 ic->commit_seq = next_commit_seq(ic->commit_seq); 2871 wraparound_section(ic, &i); 2872 } 2873 smp_rmb(); 2874 2875 write_journal(ic, commit_start, commit_sections); 2876 2877 spin_lock_irq(&ic->endio_wait.lock); 2878 ic->uncommitted_section += commit_sections; 2879 wraparound_section(ic, &ic->uncommitted_section); 2880 ic->n_uncommitted_sections -= commit_sections; 2881 ic->n_committed_sections += commit_sections; 2882 spin_unlock_irq(&ic->endio_wait.lock); 2883 2884 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) 2885 queue_work(ic->writer_wq, &ic->writer_work); 2886 2887 release_flush_bios: 2888 while (flushes) { 2889 struct bio *next = flushes->bi_next; 2890 2891 flushes->bi_next = NULL; 2892 do_endio(ic, flushes); 2893 flushes = next; 2894 } 2895 } 2896 2897 static void complete_copy_from_journal(unsigned long error, void *context) 2898 { 2899 struct journal_io *io = context; 2900 struct journal_completion *comp = io->comp; 2901 struct dm_integrity_c *ic = comp->ic; 2902 2903 remove_range(ic, &io->range); 2904 mempool_free(io, &ic->journal_io_mempool); 2905 if (unlikely(error != 0)) 2906 dm_integrity_io_error(ic, "copying from journal", -EIO); 2907 complete_journal_op(comp); 2908 } 2909 2910 static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js, 2911 struct journal_entry *je) 2912 { 2913 unsigned int s = 0; 2914 2915 do { 2916 js->commit_id = je->last_bytes[s]; 2917 js++; 2918 } while (++s < ic->sectors_per_block); 2919 } 2920 2921 static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start, 2922 unsigned int write_sections, bool from_replay) 2923 { 2924 unsigned int i, j, n; 2925 struct journal_completion comp; 2926 struct blk_plug plug; 2927 2928 blk_start_plug(&plug); 2929 2930 comp.ic = ic; 2931 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2932 init_completion(&comp.comp); 2933 2934 i = write_start; 2935 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) { 2936 #ifndef INTERNAL_VERIFY 2937 if (unlikely(from_replay)) 2938 #endif 2939 rw_section_mac(ic, i, false); 2940 for (j = 0; j < ic->journal_section_entries; j++) { 2941 struct journal_entry *je = access_journal_entry(ic, i, j); 2942 sector_t sec, area, offset; 2943 unsigned int k, l, next_loop; 2944 sector_t metadata_block; 2945 unsigned int metadata_offset; 2946 struct journal_io *io; 2947 2948 if (journal_entry_is_unused(je)) 2949 continue; 2950 BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay); 2951 sec = journal_entry_get_sector(je); 2952 if (unlikely(from_replay)) { 2953 if (unlikely(sec & (unsigned int)(ic->sectors_per_block - 1))) { 2954 dm_integrity_io_error(ic, "invalid sector in journal", -EIO); 2955 sec &= ~(sector_t)(ic->sectors_per_block - 1); 2956 } 2957 if (unlikely(sec >= ic->provided_data_sectors)) { 2958 journal_entry_set_unused(je); 2959 continue; 2960 } 2961 } 2962 get_area_and_offset(ic, sec, &area, &offset); 2963 restore_last_bytes(ic, access_journal_data(ic, i, j), je); 2964 for (k = j + 1; k < ic->journal_section_entries; k++) { 2965 struct journal_entry *je2 = access_journal_entry(ic, i, k); 2966 sector_t sec2, area2, offset2; 2967 2968 if (journal_entry_is_unused(je2)) 2969 break; 2970 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay); 2971 sec2 = journal_entry_get_sector(je2); 2972 if (unlikely(sec2 >= ic->provided_data_sectors)) 2973 break; 2974 get_area_and_offset(ic, sec2, &area2, &offset2); 2975 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block)) 2976 break; 2977 restore_last_bytes(ic, access_journal_data(ic, i, k), je2); 2978 } 2979 next_loop = k - 1; 2980 2981 io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO); 2982 io->comp = ∁ 2983 io->range.logical_sector = sec; 2984 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block; 2985 2986 spin_lock_irq(&ic->endio_wait.lock); 2987 add_new_range_and_wait(ic, &io->range); 2988 2989 if (likely(!from_replay)) { 2990 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries]; 2991 2992 /* don't write if there is newer committed sector */ 2993 while (j < k && find_newer_committed_node(ic, §ion_node[j])) { 2994 struct journal_entry *je2 = access_journal_entry(ic, i, j); 2995 2996 journal_entry_set_unused(je2); 2997 remove_journal_node(ic, §ion_node[j]); 2998 j++; 2999 sec += ic->sectors_per_block; 3000 offset += ic->sectors_per_block; 3001 } 3002 while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) { 3003 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1); 3004 3005 journal_entry_set_unused(je2); 3006 remove_journal_node(ic, §ion_node[k - 1]); 3007 k--; 3008 } 3009 if (j == k) { 3010 remove_range_unlocked(ic, &io->range); 3011 spin_unlock_irq(&ic->endio_wait.lock); 3012 mempool_free(io, &ic->journal_io_mempool); 3013 goto skip_io; 3014 } 3015 for (l = j; l < k; l++) 3016 remove_journal_node(ic, §ion_node[l]); 3017 } 3018 spin_unlock_irq(&ic->endio_wait.lock); 3019 3020 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset); 3021 for (l = j; l < k; l++) { 3022 int r; 3023 struct journal_entry *je2 = access_journal_entry(ic, i, l); 3024 3025 if ( 3026 #ifndef INTERNAL_VERIFY 3027 unlikely(from_replay) && 3028 #endif 3029 ic->internal_hash) { 3030 char test_tag[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; 3031 struct journal_sector *js = access_journal_data(ic, i, l); 3032 void *js_page = integrity_identity(ic, (char *)js - offset_in_page(js)); 3033 unsigned js_offset = offset_in_page(js); 3034 3035 integrity_sector_checksum(ic, &ic->journal_ahash_req, sec + ((l - j) << ic->sb->log2_sectors_per_block), 3036 js_page, js_offset, test_tag); 3037 if (unlikely(crypto_memneq(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) { 3038 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ); 3039 dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0); 3040 } 3041 } 3042 3043 journal_entry_set_unused(je2); 3044 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset, 3045 ic->tag_size, TAG_WRITE); 3046 if (unlikely(r)) 3047 dm_integrity_io_error(ic, "reading tags", r); 3048 } 3049 3050 atomic_inc(&comp.in_flight); 3051 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block, 3052 (k - j) << ic->sb->log2_sectors_per_block, 3053 get_data_sector(ic, area, offset), 3054 complete_copy_from_journal, io); 3055 skip_io: 3056 j = next_loop; 3057 } 3058 } 3059 3060 dm_bufio_write_dirty_buffers_async(ic->bufio); 3061 3062 blk_finish_plug(&plug); 3063 3064 complete_journal_op(&comp); 3065 wait_for_completion_io(&comp.comp); 3066 3067 dm_integrity_flush_buffers(ic, true); 3068 } 3069 3070 static void integrity_writer(struct work_struct *w) 3071 { 3072 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work); 3073 unsigned int write_start, write_sections; 3074 unsigned int prev_free_sectors; 3075 3076 spin_lock_irq(&ic->endio_wait.lock); 3077 write_start = ic->committed_section; 3078 write_sections = ic->n_committed_sections; 3079 spin_unlock_irq(&ic->endio_wait.lock); 3080 3081 if (!write_sections) 3082 return; 3083 3084 do_journal_write(ic, write_start, write_sections, false); 3085 3086 spin_lock_irq(&ic->endio_wait.lock); 3087 3088 ic->committed_section += write_sections; 3089 wraparound_section(ic, &ic->committed_section); 3090 ic->n_committed_sections -= write_sections; 3091 3092 prev_free_sectors = ic->free_sectors; 3093 ic->free_sectors += write_sections * ic->journal_section_entries; 3094 if (unlikely(!prev_free_sectors)) 3095 wake_up_locked(&ic->endio_wait); 3096 3097 spin_unlock_irq(&ic->endio_wait.lock); 3098 } 3099 3100 static void recalc_write_super(struct dm_integrity_c *ic) 3101 { 3102 int r; 3103 3104 dm_integrity_flush_buffers(ic, false); 3105 if (dm_integrity_failed(ic)) 3106 return; 3107 3108 r = sync_rw_sb(ic, REQ_OP_WRITE); 3109 if (unlikely(r)) 3110 dm_integrity_io_error(ic, "writing superblock", r); 3111 } 3112 3113 static void integrity_recalc(struct work_struct *w) 3114 { 3115 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work); 3116 size_t recalc_tags_size; 3117 u8 *recalc_buffer = NULL; 3118 u8 *recalc_tags = NULL; 3119 struct ahash_request *ahash_req = NULL; 3120 struct dm_integrity_range range; 3121 struct dm_io_request io_req; 3122 struct dm_io_region io_loc; 3123 sector_t area, offset; 3124 sector_t metadata_block; 3125 unsigned int metadata_offset; 3126 sector_t logical_sector, n_sectors; 3127 __u8 *t; 3128 unsigned int i; 3129 int r; 3130 unsigned int super_counter = 0; 3131 unsigned recalc_sectors = RECALC_SECTORS; 3132 3133 retry: 3134 recalc_buffer = kmalloc(recalc_sectors << SECTOR_SHIFT, GFP_NOIO | __GFP_NOWARN); 3135 if (!recalc_buffer) { 3136 oom: 3137 recalc_sectors >>= 1; 3138 if (recalc_sectors >= 1U << ic->sb->log2_sectors_per_block) 3139 goto retry; 3140 DMCRIT("out of memory for recalculate buffer - recalculation disabled"); 3141 goto free_ret; 3142 } 3143 recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size; 3144 if (ic->internal_hash_digestsize > ic->tag_size) 3145 recalc_tags_size += ic->internal_hash_digestsize - ic->tag_size; 3146 recalc_tags = kvmalloc(recalc_tags_size, GFP_NOIO); 3147 if (!recalc_tags) { 3148 kfree(recalc_buffer); 3149 recalc_buffer = NULL; 3150 goto oom; 3151 } 3152 3153 DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector)); 3154 3155 spin_lock_irq(&ic->endio_wait.lock); 3156 3157 next_chunk: 3158 3159 if (unlikely(dm_post_suspending(ic->ti))) 3160 goto unlock_ret; 3161 3162 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); 3163 if (unlikely(range.logical_sector >= ic->provided_data_sectors)) { 3164 if (ic->mode == 'B') { 3165 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); 3166 DEBUG_print("queue_delayed_work: bitmap_flush_work\n"); 3167 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); 3168 } 3169 goto unlock_ret; 3170 } 3171 3172 get_area_and_offset(ic, range.logical_sector, &area, &offset); 3173 range.n_sectors = min((sector_t)recalc_sectors, ic->provided_data_sectors - range.logical_sector); 3174 if (!ic->meta_dev) 3175 range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned int)offset); 3176 3177 add_new_range_and_wait(ic, &range); 3178 spin_unlock_irq(&ic->endio_wait.lock); 3179 logical_sector = range.logical_sector; 3180 n_sectors = range.n_sectors; 3181 3182 if (ic->mode == 'B') { 3183 if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) 3184 goto advance_and_next; 3185 3186 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, 3187 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) { 3188 logical_sector += ic->sectors_per_block; 3189 n_sectors -= ic->sectors_per_block; 3190 cond_resched(); 3191 } 3192 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block, 3193 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) { 3194 n_sectors -= ic->sectors_per_block; 3195 cond_resched(); 3196 } 3197 get_area_and_offset(ic, logical_sector, &area, &offset); 3198 } 3199 3200 DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors); 3201 3202 if (unlikely(++super_counter == RECALC_WRITE_SUPER)) { 3203 recalc_write_super(ic); 3204 if (ic->mode == 'B') 3205 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval); 3206 3207 super_counter = 0; 3208 } 3209 3210 if (unlikely(dm_integrity_failed(ic))) 3211 goto err; 3212 3213 io_req.bi_opf = REQ_OP_READ; 3214 io_req.mem.type = DM_IO_KMEM; 3215 io_req.mem.ptr.addr = recalc_buffer; 3216 io_req.notify.fn = NULL; 3217 io_req.client = ic->io; 3218 io_loc.bdev = ic->dev->bdev; 3219 io_loc.sector = get_data_sector(ic, area, offset); 3220 io_loc.count = n_sectors; 3221 3222 r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT); 3223 if (unlikely(r)) { 3224 dm_integrity_io_error(ic, "reading data", r); 3225 goto err; 3226 } 3227 3228 t = recalc_tags; 3229 for (i = 0; i < n_sectors; i += ic->sectors_per_block) { 3230 void *ptr = recalc_buffer + (i << SECTOR_SHIFT); 3231 void *ptr_page = integrity_identity(ic, (char *)ptr - offset_in_page(ptr)); 3232 unsigned ptr_offset = offset_in_page(ptr); 3233 integrity_sector_checksum(ic, &ahash_req, logical_sector + i, ptr_page, ptr_offset, t); 3234 t += ic->tag_size; 3235 } 3236 3237 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset); 3238 3239 r = dm_integrity_rw_tag(ic, recalc_tags, &metadata_block, &metadata_offset, t - recalc_tags, TAG_WRITE); 3240 if (unlikely(r)) { 3241 dm_integrity_io_error(ic, "writing tags", r); 3242 goto err; 3243 } 3244 3245 if (ic->mode == 'B') { 3246 sector_t start, end; 3247 3248 start = (range.logical_sector >> 3249 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << 3250 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); 3251 end = ((range.logical_sector + range.n_sectors) >> 3252 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << 3253 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); 3254 block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR); 3255 } 3256 3257 advance_and_next: 3258 cond_resched(); 3259 3260 spin_lock_irq(&ic->endio_wait.lock); 3261 remove_range_unlocked(ic, &range); 3262 ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors); 3263 goto next_chunk; 3264 3265 err: 3266 remove_range(ic, &range); 3267 goto free_ret; 3268 3269 unlock_ret: 3270 spin_unlock_irq(&ic->endio_wait.lock); 3271 3272 recalc_write_super(ic); 3273 3274 free_ret: 3275 kfree(recalc_buffer); 3276 kvfree(recalc_tags); 3277 mempool_free(ahash_req, &ic->ahash_req_pool); 3278 } 3279 3280 static void integrity_recalc_inline(struct work_struct *w) 3281 { 3282 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work); 3283 size_t recalc_tags_size; 3284 u8 *recalc_buffer = NULL; 3285 u8 *recalc_tags = NULL; 3286 struct ahash_request *ahash_req = NULL; 3287 struct dm_integrity_range range; 3288 struct bio *bio; 3289 struct bio_integrity_payload *bip; 3290 __u8 *t; 3291 unsigned int i; 3292 int r; 3293 unsigned ret; 3294 unsigned int super_counter = 0; 3295 unsigned recalc_sectors = RECALC_SECTORS; 3296 3297 retry: 3298 recalc_buffer = kmalloc(recalc_sectors << SECTOR_SHIFT, GFP_NOIO | __GFP_NOWARN); 3299 if (!recalc_buffer) { 3300 oom: 3301 recalc_sectors >>= 1; 3302 if (recalc_sectors >= 1U << ic->sb->log2_sectors_per_block) 3303 goto retry; 3304 DMCRIT("out of memory for recalculate buffer - recalculation disabled"); 3305 goto free_ret; 3306 } 3307 3308 recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tuple_size; 3309 if (ic->internal_hash_digestsize > ic->tuple_size) 3310 recalc_tags_size += ic->internal_hash_digestsize - ic->tuple_size; 3311 recalc_tags = kmalloc(recalc_tags_size, GFP_NOIO | __GFP_NOWARN); 3312 if (!recalc_tags) { 3313 kfree(recalc_buffer); 3314 recalc_buffer = NULL; 3315 goto oom; 3316 } 3317 3318 spin_lock_irq(&ic->endio_wait.lock); 3319 3320 next_chunk: 3321 if (unlikely(dm_post_suspending(ic->ti))) 3322 goto unlock_ret; 3323 3324 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); 3325 if (unlikely(range.logical_sector >= ic->provided_data_sectors)) 3326 goto unlock_ret; 3327 range.n_sectors = min((sector_t)recalc_sectors, ic->provided_data_sectors - range.logical_sector); 3328 3329 add_new_range_and_wait(ic, &range); 3330 spin_unlock_irq(&ic->endio_wait.lock); 3331 3332 if (unlikely(++super_counter == RECALC_WRITE_SUPER)) { 3333 recalc_write_super(ic); 3334 super_counter = 0; 3335 } 3336 3337 if (unlikely(dm_integrity_failed(ic))) 3338 goto err; 3339 3340 DEBUG_print("recalculating: %llx - %llx\n", range.logical_sector, range.n_sectors); 3341 3342 bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_READ, GFP_NOIO, &ic->recalc_bios); 3343 bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector; 3344 bio_add_virt_nofail(bio, recalc_buffer, 3345 range.n_sectors << SECTOR_SHIFT); 3346 r = submit_bio_wait(bio); 3347 bio_put(bio); 3348 if (unlikely(r)) { 3349 dm_integrity_io_error(ic, "reading data", r); 3350 goto err; 3351 } 3352 3353 t = recalc_tags; 3354 for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) { 3355 void *ptr = recalc_buffer + (i << SECTOR_SHIFT); 3356 void *ptr_page = integrity_identity(ic, (char *)ptr - offset_in_page(ptr)); 3357 unsigned ptr_offset = offset_in_page(ptr); 3358 memset(t, 0, ic->tuple_size); 3359 integrity_sector_checksum(ic, &ahash_req, range.logical_sector + i, ptr_page, ptr_offset, t); 3360 t += ic->tuple_size; 3361 } 3362 3363 bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_WRITE, GFP_NOIO, &ic->recalc_bios); 3364 bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector; 3365 bio_add_virt_nofail(bio, recalc_buffer, 3366 range.n_sectors << SECTOR_SHIFT); 3367 3368 bip = bio_integrity_alloc(bio, GFP_NOIO, 1); 3369 if (unlikely(IS_ERR(bip))) { 3370 bio_put(bio); 3371 DMCRIT("out of memory for bio integrity payload - recalculation disabled"); 3372 goto err; 3373 } 3374 ret = bio_integrity_add_page(bio, virt_to_page(recalc_tags), t - recalc_tags, offset_in_page(recalc_tags)); 3375 if (unlikely(ret != t - recalc_tags)) { 3376 bio_put(bio); 3377 dm_integrity_io_error(ic, "attaching integrity tags", -ENOMEM); 3378 goto err; 3379 } 3380 3381 r = submit_bio_wait(bio); 3382 bio_put(bio); 3383 if (unlikely(r)) { 3384 dm_integrity_io_error(ic, "writing data", r); 3385 goto err; 3386 } 3387 3388 cond_resched(); 3389 spin_lock_irq(&ic->endio_wait.lock); 3390 remove_range_unlocked(ic, &range); 3391 #ifdef CONFIG_64BIT 3392 /* Paired with smp_load_acquire in dm_integrity_map_inline. */ 3393 smp_store_release(&ic->sb->recalc_sector, cpu_to_le64(range.logical_sector + range.n_sectors)); 3394 #else 3395 ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors); 3396 #endif 3397 goto next_chunk; 3398 3399 err: 3400 remove_range(ic, &range); 3401 goto free_ret; 3402 3403 unlock_ret: 3404 spin_unlock_irq(&ic->endio_wait.lock); 3405 3406 recalc_write_super(ic); 3407 3408 free_ret: 3409 kfree(recalc_buffer); 3410 kfree(recalc_tags); 3411 mempool_free(ahash_req, &ic->ahash_req_pool); 3412 } 3413 3414 static void bitmap_block_work(struct work_struct *w) 3415 { 3416 struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work); 3417 struct dm_integrity_c *ic = bbs->ic; 3418 struct bio *bio; 3419 struct bio_list bio_queue; 3420 struct bio_list waiting; 3421 3422 bio_list_init(&waiting); 3423 3424 spin_lock(&bbs->bio_queue_lock); 3425 bio_queue = bbs->bio_queue; 3426 bio_list_init(&bbs->bio_queue); 3427 spin_unlock(&bbs->bio_queue_lock); 3428 3429 while ((bio = bio_list_pop(&bio_queue))) { 3430 struct dm_integrity_io *dio; 3431 3432 dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); 3433 3434 if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, 3435 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) { 3436 remove_range(ic, &dio->range); 3437 INIT_WORK(&dio->work, integrity_bio_wait); 3438 queue_work(ic->offload_wq, &dio->work); 3439 } else { 3440 block_bitmap_op(ic, ic->journal, dio->range.logical_sector, 3441 dio->range.n_sectors, BITMAP_OP_SET); 3442 bio_list_add(&waiting, bio); 3443 } 3444 } 3445 3446 if (bio_list_empty(&waiting)) 3447 return; 3448 3449 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 3450 bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), 3451 BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL); 3452 3453 while ((bio = bio_list_pop(&waiting))) { 3454 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); 3455 3456 block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, 3457 dio->range.n_sectors, BITMAP_OP_SET); 3458 3459 remove_range(ic, &dio->range); 3460 INIT_WORK(&dio->work, integrity_bio_wait); 3461 queue_work(ic->offload_wq, &dio->work); 3462 } 3463 3464 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval); 3465 } 3466 3467 static void bitmap_flush_work(struct work_struct *work) 3468 { 3469 struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work); 3470 struct dm_integrity_range range; 3471 unsigned long limit; 3472 struct bio *bio; 3473 3474 dm_integrity_flush_buffers(ic, false); 3475 3476 range.logical_sector = 0; 3477 range.n_sectors = ic->provided_data_sectors; 3478 3479 spin_lock_irq(&ic->endio_wait.lock); 3480 add_new_range_and_wait(ic, &range); 3481 spin_unlock_irq(&ic->endio_wait.lock); 3482 3483 dm_integrity_flush_buffers(ic, true); 3484 3485 limit = ic->provided_data_sectors; 3486 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { 3487 limit = le64_to_cpu(ic->sb->recalc_sector) 3488 >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit) 3489 << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); 3490 } 3491 /*DEBUG_print("zeroing journal\n");*/ 3492 block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR); 3493 block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR); 3494 3495 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, 3496 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); 3497 3498 spin_lock_irq(&ic->endio_wait.lock); 3499 remove_range_unlocked(ic, &range); 3500 while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) { 3501 bio_endio(bio); 3502 spin_unlock_irq(&ic->endio_wait.lock); 3503 spin_lock_irq(&ic->endio_wait.lock); 3504 } 3505 spin_unlock_irq(&ic->endio_wait.lock); 3506 } 3507 3508 3509 static void init_journal(struct dm_integrity_c *ic, unsigned int start_section, 3510 unsigned int n_sections, unsigned char commit_seq) 3511 { 3512 unsigned int i, j, n; 3513 3514 if (!n_sections) 3515 return; 3516 3517 for (n = 0; n < n_sections; n++) { 3518 i = start_section + n; 3519 wraparound_section(ic, &i); 3520 for (j = 0; j < ic->journal_section_sectors; j++) { 3521 struct journal_sector *js = access_journal(ic, i, j); 3522 3523 BUILD_BUG_ON(sizeof(js->sectors) != JOURNAL_SECTOR_DATA); 3524 memset(&js->sectors, 0, sizeof(js->sectors)); 3525 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq); 3526 } 3527 for (j = 0; j < ic->journal_section_entries; j++) { 3528 struct journal_entry *je = access_journal_entry(ic, i, j); 3529 3530 journal_entry_set_unused(je); 3531 } 3532 } 3533 3534 write_journal(ic, start_section, n_sections); 3535 } 3536 3537 static int find_commit_seq(struct dm_integrity_c *ic, unsigned int i, unsigned int j, commit_id_t id) 3538 { 3539 unsigned char k; 3540 3541 for (k = 0; k < N_COMMIT_IDS; k++) { 3542 if (dm_integrity_commit_id(ic, i, j, k) == id) 3543 return k; 3544 } 3545 dm_integrity_io_error(ic, "journal commit id", -EIO); 3546 return -EIO; 3547 } 3548 3549 static void replay_journal(struct dm_integrity_c *ic) 3550 { 3551 unsigned int i, j; 3552 bool used_commit_ids[N_COMMIT_IDS]; 3553 unsigned int max_commit_id_sections[N_COMMIT_IDS]; 3554 unsigned int write_start, write_sections; 3555 unsigned int continue_section; 3556 bool journal_empty; 3557 unsigned char unused, last_used, want_commit_seq; 3558 3559 if (ic->mode == 'R') 3560 return; 3561 3562 if (ic->journal_uptodate) 3563 return; 3564 3565 last_used = 0; 3566 write_start = 0; 3567 3568 if (!ic->just_formatted) { 3569 DEBUG_print("reading journal\n"); 3570 rw_journal(ic, REQ_OP_READ, 0, ic->journal_sections, NULL); 3571 if (ic->journal_io) 3572 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal"); 3573 if (ic->journal_io) { 3574 struct journal_completion crypt_comp; 3575 3576 crypt_comp.ic = ic; 3577 init_completion(&crypt_comp.comp); 3578 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0); 3579 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp); 3580 wait_for_completion(&crypt_comp.comp); 3581 } 3582 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal"); 3583 } 3584 3585 if (dm_integrity_failed(ic)) 3586 goto clear_journal; 3587 3588 journal_empty = true; 3589 memset(used_commit_ids, 0, sizeof(used_commit_ids)); 3590 memset(max_commit_id_sections, 0, sizeof(max_commit_id_sections)); 3591 for (i = 0; i < ic->journal_sections; i++) { 3592 for (j = 0; j < ic->journal_section_sectors; j++) { 3593 int k; 3594 struct journal_sector *js = access_journal(ic, i, j); 3595 3596 k = find_commit_seq(ic, i, j, js->commit_id); 3597 if (k < 0) 3598 goto clear_journal; 3599 used_commit_ids[k] = true; 3600 max_commit_id_sections[k] = i; 3601 } 3602 if (journal_empty) { 3603 for (j = 0; j < ic->journal_section_entries; j++) { 3604 struct journal_entry *je = access_journal_entry(ic, i, j); 3605 3606 if (!journal_entry_is_unused(je)) { 3607 journal_empty = false; 3608 break; 3609 } 3610 } 3611 } 3612 } 3613 3614 if (!used_commit_ids[N_COMMIT_IDS - 1]) { 3615 unused = N_COMMIT_IDS - 1; 3616 while (unused && !used_commit_ids[unused - 1]) 3617 unused--; 3618 } else { 3619 for (unused = 0; unused < N_COMMIT_IDS; unused++) 3620 if (!used_commit_ids[unused]) 3621 break; 3622 if (unused == N_COMMIT_IDS) { 3623 dm_integrity_io_error(ic, "journal commit ids", -EIO); 3624 goto clear_journal; 3625 } 3626 } 3627 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n", 3628 unused, used_commit_ids[0], used_commit_ids[1], 3629 used_commit_ids[2], used_commit_ids[3]); 3630 3631 last_used = prev_commit_seq(unused); 3632 want_commit_seq = prev_commit_seq(last_used); 3633 3634 if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)]) 3635 journal_empty = true; 3636 3637 write_start = max_commit_id_sections[last_used] + 1; 3638 if (unlikely(write_start >= ic->journal_sections)) 3639 want_commit_seq = next_commit_seq(want_commit_seq); 3640 wraparound_section(ic, &write_start); 3641 3642 i = write_start; 3643 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) { 3644 for (j = 0; j < ic->journal_section_sectors; j++) { 3645 struct journal_sector *js = access_journal(ic, i, j); 3646 3647 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) { 3648 /* 3649 * This could be caused by crash during writing. 3650 * We won't replay the inconsistent part of the 3651 * journal. 3652 */ 3653 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n", 3654 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq); 3655 goto brk; 3656 } 3657 } 3658 i++; 3659 if (unlikely(i >= ic->journal_sections)) 3660 want_commit_seq = next_commit_seq(want_commit_seq); 3661 wraparound_section(ic, &i); 3662 } 3663 brk: 3664 3665 if (!journal_empty) { 3666 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n", 3667 write_sections, write_start, want_commit_seq); 3668 do_journal_write(ic, write_start, write_sections, true); 3669 } 3670 3671 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) { 3672 continue_section = write_start; 3673 ic->commit_seq = want_commit_seq; 3674 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq); 3675 } else { 3676 unsigned int s; 3677 unsigned char erase_seq; 3678 3679 clear_journal: 3680 DEBUG_print("clearing journal\n"); 3681 3682 erase_seq = prev_commit_seq(prev_commit_seq(last_used)); 3683 s = write_start; 3684 init_journal(ic, s, 1, erase_seq); 3685 s++; 3686 wraparound_section(ic, &s); 3687 if (ic->journal_sections >= 2) { 3688 init_journal(ic, s, ic->journal_sections - 2, erase_seq); 3689 s += ic->journal_sections - 2; 3690 wraparound_section(ic, &s); 3691 init_journal(ic, s, 1, erase_seq); 3692 } 3693 3694 continue_section = 0; 3695 ic->commit_seq = next_commit_seq(erase_seq); 3696 } 3697 3698 ic->committed_section = continue_section; 3699 ic->n_committed_sections = 0; 3700 3701 ic->uncommitted_section = continue_section; 3702 ic->n_uncommitted_sections = 0; 3703 3704 ic->free_section = continue_section; 3705 ic->free_section_entry = 0; 3706 ic->free_sectors = ic->journal_entries; 3707 3708 ic->journal_tree_root = RB_ROOT; 3709 for (i = 0; i < ic->journal_entries; i++) 3710 init_journal_node(&ic->journal_tree[i]); 3711 } 3712 3713 static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic) 3714 { 3715 DEBUG_print("%s\n", __func__); 3716 3717 if (ic->mode == 'B') { 3718 ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1; 3719 ic->synchronous_mode = 1; 3720 3721 cancel_delayed_work_sync(&ic->bitmap_flush_work); 3722 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); 3723 flush_workqueue(ic->commit_wq); 3724 } 3725 } 3726 3727 static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x) 3728 { 3729 struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier); 3730 3731 DEBUG_print("%s\n", __func__); 3732 3733 dm_integrity_enter_synchronous_mode(ic); 3734 3735 return NOTIFY_DONE; 3736 } 3737 3738 static void dm_integrity_postsuspend(struct dm_target *ti) 3739 { 3740 struct dm_integrity_c *ic = ti->private; 3741 int r; 3742 3743 WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier)); 3744 3745 timer_delete_sync(&ic->autocommit_timer); 3746 3747 if (ic->recalc_wq) 3748 drain_workqueue(ic->recalc_wq); 3749 3750 if (ic->mode == 'B') 3751 cancel_delayed_work_sync(&ic->bitmap_flush_work); 3752 3753 queue_work(ic->commit_wq, &ic->commit_work); 3754 drain_workqueue(ic->commit_wq); 3755 3756 if (ic->mode == 'J') { 3757 queue_work(ic->writer_wq, &ic->writer_work); 3758 drain_workqueue(ic->writer_wq); 3759 dm_integrity_flush_buffers(ic, true); 3760 if (ic->wrote_to_journal) { 3761 init_journal(ic, ic->free_section, 3762 ic->journal_sections - ic->free_section, ic->commit_seq); 3763 if (ic->free_section) { 3764 init_journal(ic, 0, ic->free_section, 3765 next_commit_seq(ic->commit_seq)); 3766 } 3767 } 3768 } 3769 3770 if (ic->mode == 'B') { 3771 dm_integrity_flush_buffers(ic, true); 3772 #if 1 3773 /* set to 0 to test bitmap replay code */ 3774 init_journal(ic, 0, ic->journal_sections, 0); 3775 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP); 3776 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); 3777 if (unlikely(r)) 3778 dm_integrity_io_error(ic, "writing superblock", r); 3779 #endif 3780 } 3781 3782 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); 3783 3784 ic->journal_uptodate = true; 3785 } 3786 3787 static void dm_integrity_resume(struct dm_target *ti) 3788 { 3789 struct dm_integrity_c *ic = ti->private; 3790 __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors); 3791 int r; 3792 __le32 flags; 3793 3794 DEBUG_print("resume\n"); 3795 3796 ic->wrote_to_journal = false; 3797 3798 flags = ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING); 3799 r = sync_rw_sb(ic, REQ_OP_READ); 3800 if (r) 3801 dm_integrity_io_error(ic, "reading superblock", r); 3802 if ((ic->sb->flags & flags) != flags) { 3803 ic->sb->flags |= flags; 3804 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); 3805 if (unlikely(r)) 3806 dm_integrity_io_error(ic, "writing superblock", r); 3807 } 3808 3809 if (ic->provided_data_sectors != old_provided_data_sectors) { 3810 if (ic->provided_data_sectors > old_provided_data_sectors && 3811 ic->mode == 'B' && 3812 ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP) && 3813 ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) { 3814 rw_journal_sectors(ic, REQ_OP_READ, 0, 3815 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); 3816 block_bitmap_op(ic, ic->journal, old_provided_data_sectors, 3817 ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET); 3818 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, 3819 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); 3820 } 3821 3822 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors); 3823 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); 3824 if (unlikely(r)) 3825 dm_integrity_io_error(ic, "writing superblock", r); 3826 } 3827 3828 if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) { 3829 DEBUG_print("resume dirty_bitmap\n"); 3830 rw_journal_sectors(ic, REQ_OP_READ, 0, 3831 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); 3832 if (ic->mode == 'B') { 3833 if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit && 3834 !ic->reset_recalculate_flag) { 3835 block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal); 3836 block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal); 3837 if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, 3838 BITMAP_OP_TEST_ALL_CLEAR)) { 3839 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); 3840 ic->sb->recalc_sector = cpu_to_le64(0); 3841 } 3842 } else { 3843 DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n", 3844 ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit); 3845 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; 3846 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET); 3847 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET); 3848 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET); 3849 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, 3850 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); 3851 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); 3852 ic->sb->recalc_sector = cpu_to_le64(0); 3853 } 3854 } else { 3855 if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit && 3856 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR)) || 3857 ic->reset_recalculate_flag) { 3858 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); 3859 ic->sb->recalc_sector = cpu_to_le64(0); 3860 } 3861 init_journal(ic, 0, ic->journal_sections, 0); 3862 replay_journal(ic); 3863 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP); 3864 } 3865 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); 3866 if (unlikely(r)) 3867 dm_integrity_io_error(ic, "writing superblock", r); 3868 } else { 3869 replay_journal(ic); 3870 if (ic->reset_recalculate_flag) { 3871 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); 3872 ic->sb->recalc_sector = cpu_to_le64(0); 3873 } 3874 if (ic->mode == 'B') { 3875 ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP); 3876 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; 3877 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); 3878 if (unlikely(r)) 3879 dm_integrity_io_error(ic, "writing superblock", r); 3880 3881 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); 3882 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); 3883 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); 3884 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && 3885 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) { 3886 block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector), 3887 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); 3888 block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector), 3889 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); 3890 block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector), 3891 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); 3892 } 3893 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, 3894 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); 3895 } 3896 } 3897 3898 DEBUG_print("testing recalc: %x\n", ic->sb->flags); 3899 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { 3900 __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector); 3901 3902 DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors); 3903 if (recalc_pos < ic->provided_data_sectors) { 3904 queue_work(ic->recalc_wq, &ic->recalc_work); 3905 } else if (recalc_pos > ic->provided_data_sectors) { 3906 ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors); 3907 recalc_write_super(ic); 3908 } 3909 } 3910 3911 ic->reboot_notifier.notifier_call = dm_integrity_reboot; 3912 ic->reboot_notifier.next = NULL; 3913 ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */ 3914 WARN_ON(register_reboot_notifier(&ic->reboot_notifier)); 3915 3916 #if 0 3917 /* set to 1 to stress test synchronous mode */ 3918 dm_integrity_enter_synchronous_mode(ic); 3919 #endif 3920 } 3921 3922 static void dm_integrity_status(struct dm_target *ti, status_type_t type, 3923 unsigned int status_flags, char *result, unsigned int maxlen) 3924 { 3925 struct dm_integrity_c *ic = ti->private; 3926 unsigned int arg_count; 3927 size_t sz = 0; 3928 3929 switch (type) { 3930 case STATUSTYPE_INFO: 3931 DMEMIT("%llu %llu", 3932 (unsigned long long)atomic64_read(&ic->number_of_mismatches), 3933 ic->provided_data_sectors); 3934 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) 3935 DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector)); 3936 else 3937 DMEMIT(" -"); 3938 break; 3939 3940 case STATUSTYPE_TABLE: { 3941 arg_count = 1; /* buffer_sectors */ 3942 arg_count += !!ic->meta_dev; 3943 arg_count += ic->sectors_per_block != 1; 3944 arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)); 3945 arg_count += ic->reset_recalculate_flag; 3946 arg_count += ic->discard; 3947 arg_count += ic->mode != 'I'; /* interleave_sectors */ 3948 arg_count += ic->mode == 'J'; /* journal_sectors */ 3949 arg_count += ic->mode == 'J'; /* journal_watermark */ 3950 arg_count += ic->mode == 'J'; /* commit_time */ 3951 arg_count += ic->mode == 'B'; /* sectors_per_bit */ 3952 arg_count += ic->mode == 'B'; /* bitmap_flush_interval */ 3953 arg_count += !!ic->internal_hash_alg.alg_string; 3954 arg_count += !!ic->journal_crypt_alg.alg_string; 3955 arg_count += !!ic->journal_mac_alg.alg_string; 3956 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0; 3957 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0; 3958 arg_count += ic->legacy_recalculate; 3959 DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start, 3960 ic->tag_size, ic->mode, arg_count); 3961 if (ic->meta_dev) 3962 DMEMIT(" meta_device:%s", ic->meta_dev->name); 3963 if (ic->sectors_per_block != 1) 3964 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT); 3965 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) 3966 DMEMIT(" recalculate"); 3967 if (ic->reset_recalculate_flag) 3968 DMEMIT(" reset_recalculate"); 3969 if (ic->discard) 3970 DMEMIT(" allow_discards"); 3971 if (ic->mode != 'I') 3972 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors); 3973 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors); 3974 if (ic->mode == 'J') { 3975 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100; 3976 3977 watermark_percentage += ic->journal_entries / 2; 3978 do_div(watermark_percentage, ic->journal_entries); 3979 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS); 3980 DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage); 3981 DMEMIT(" commit_time:%u", ic->autocommit_msec); 3982 } 3983 if (ic->mode == 'B') { 3984 DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit); 3985 DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval)); 3986 } 3987 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0) 3988 DMEMIT(" fix_padding"); 3989 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0) 3990 DMEMIT(" fix_hmac"); 3991 if (ic->legacy_recalculate) 3992 DMEMIT(" legacy_recalculate"); 3993 3994 #define EMIT_ALG(a, n) \ 3995 do { \ 3996 if (ic->a.alg_string) { \ 3997 DMEMIT(" %s:%s", n, ic->a.alg_string); \ 3998 if (ic->a.key_string) \ 3999 DMEMIT(":%s", ic->a.key_string);\ 4000 } \ 4001 } while (0) 4002 EMIT_ALG(internal_hash_alg, "internal_hash"); 4003 EMIT_ALG(journal_crypt_alg, "journal_crypt"); 4004 EMIT_ALG(journal_mac_alg, "journal_mac"); 4005 break; 4006 } 4007 case STATUSTYPE_IMA: 4008 DMEMIT_TARGET_NAME_VERSION(ti->type); 4009 DMEMIT(",dev_name=%s,start=%llu,tag_size=%u,mode=%c", 4010 ic->dev->name, ic->start, ic->tag_size, ic->mode); 4011 4012 if (ic->meta_dev) 4013 DMEMIT(",meta_device=%s", ic->meta_dev->name); 4014 if (ic->sectors_per_block != 1) 4015 DMEMIT(",block_size=%u", ic->sectors_per_block << SECTOR_SHIFT); 4016 4017 DMEMIT(",recalculate=%c", (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) ? 4018 'y' : 'n'); 4019 DMEMIT(",allow_discards=%c", ic->discard ? 'y' : 'n'); 4020 DMEMIT(",fix_padding=%c", 4021 ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0) ? 'y' : 'n'); 4022 DMEMIT(",fix_hmac=%c", 4023 ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0) ? 'y' : 'n'); 4024 DMEMIT(",legacy_recalculate=%c", ic->legacy_recalculate ? 'y' : 'n'); 4025 4026 DMEMIT(",journal_sectors=%u", ic->initial_sectors - SB_SECTORS); 4027 DMEMIT(",interleave_sectors=%u", 1U << ic->sb->log2_interleave_sectors); 4028 DMEMIT(",buffer_sectors=%u", 1U << ic->log2_buffer_sectors); 4029 DMEMIT(";"); 4030 break; 4031 } 4032 } 4033 4034 static int dm_integrity_iterate_devices(struct dm_target *ti, 4035 iterate_devices_callout_fn fn, void *data) 4036 { 4037 struct dm_integrity_c *ic = ti->private; 4038 4039 if (!ic->meta_dev) 4040 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data); 4041 else 4042 return fn(ti, ic->dev, 0, ti->len, data); 4043 } 4044 4045 static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits) 4046 { 4047 struct dm_integrity_c *ic = ti->private; 4048 4049 if (ic->sectors_per_block > 1) { 4050 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT; 4051 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT; 4052 limits->io_min = ic->sectors_per_block << SECTOR_SHIFT; 4053 limits->dma_alignment = limits->logical_block_size - 1; 4054 limits->discard_granularity = ic->sectors_per_block << SECTOR_SHIFT; 4055 } 4056 4057 if (!ic->internal_hash) { 4058 struct blk_integrity *bi = &limits->integrity; 4059 4060 memset(bi, 0, sizeof(*bi)); 4061 bi->metadata_size = ic->tag_size; 4062 bi->tag_size = bi->metadata_size; 4063 bi->interval_exp = 4064 ic->sb->log2_sectors_per_block + SECTOR_SHIFT; 4065 } 4066 4067 limits->max_integrity_segments = USHRT_MAX; 4068 } 4069 4070 static void calculate_journal_section_size(struct dm_integrity_c *ic) 4071 { 4072 unsigned int sector_space = JOURNAL_SECTOR_DATA; 4073 4074 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections); 4075 ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size, 4076 JOURNAL_ENTRY_ROUNDUP); 4077 4078 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) 4079 sector_space -= JOURNAL_MAC_PER_SECTOR; 4080 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size; 4081 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS; 4082 ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS; 4083 ic->journal_entries = ic->journal_section_entries * ic->journal_sections; 4084 } 4085 4086 static int calculate_device_limits(struct dm_integrity_c *ic) 4087 { 4088 __u64 initial_sectors; 4089 4090 calculate_journal_section_size(ic); 4091 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections; 4092 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX) 4093 return -EINVAL; 4094 ic->initial_sectors = initial_sectors; 4095 4096 if (ic->mode == 'I') { 4097 if (ic->initial_sectors + ic->provided_data_sectors > ic->meta_device_sectors) 4098 return -EINVAL; 4099 } else if (!ic->meta_dev) { 4100 sector_t last_sector, last_area, last_offset; 4101 4102 /* we have to maintain excessive padding for compatibility with existing volumes */ 4103 __u64 metadata_run_padding = 4104 ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ? 4105 (__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) : 4106 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS); 4107 4108 ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block), 4109 metadata_run_padding) >> SECTOR_SHIFT; 4110 if (!(ic->metadata_run & (ic->metadata_run - 1))) 4111 ic->log2_metadata_run = __ffs(ic->metadata_run); 4112 else 4113 ic->log2_metadata_run = -1; 4114 4115 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset); 4116 last_sector = get_data_sector(ic, last_area, last_offset); 4117 if (last_sector < ic->start || last_sector >= ic->meta_device_sectors) 4118 return -EINVAL; 4119 } else { 4120 __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size; 4121 4122 meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1)) 4123 >> (ic->log2_buffer_sectors + SECTOR_SHIFT); 4124 meta_size <<= ic->log2_buffer_sectors; 4125 if (ic->initial_sectors + meta_size < ic->initial_sectors || 4126 ic->initial_sectors + meta_size > ic->meta_device_sectors) 4127 return -EINVAL; 4128 ic->metadata_run = 1; 4129 ic->log2_metadata_run = 0; 4130 } 4131 4132 return 0; 4133 } 4134 4135 static void get_provided_data_sectors(struct dm_integrity_c *ic) 4136 { 4137 if (!ic->meta_dev) { 4138 int test_bit; 4139 4140 ic->provided_data_sectors = 0; 4141 for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) { 4142 __u64 prev_data_sectors = ic->provided_data_sectors; 4143 4144 ic->provided_data_sectors |= (sector_t)1 << test_bit; 4145 if (calculate_device_limits(ic)) 4146 ic->provided_data_sectors = prev_data_sectors; 4147 } 4148 } else { 4149 ic->provided_data_sectors = ic->data_device_sectors; 4150 ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1); 4151 } 4152 } 4153 4154 static int initialize_superblock(struct dm_integrity_c *ic, 4155 unsigned int journal_sectors, unsigned int interleave_sectors) 4156 { 4157 unsigned int journal_sections; 4158 int test_bit; 4159 4160 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT); 4161 memcpy(ic->sb->magic, SB_MAGIC, 8); 4162 if (ic->mode == 'I') 4163 ic->sb->flags |= cpu_to_le32(SB_FLAG_INLINE); 4164 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size); 4165 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block); 4166 if (ic->journal_mac_alg.alg_string) 4167 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC); 4168 4169 calculate_journal_section_size(ic); 4170 journal_sections = journal_sectors / ic->journal_section_sectors; 4171 if (!journal_sections) 4172 journal_sections = 1; 4173 if (ic->mode == 'I') 4174 journal_sections = 0; 4175 4176 if (ic->fix_hmac && (ic->internal_hash_alg.alg_string || ic->journal_mac_alg.alg_string)) { 4177 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_HMAC); 4178 get_random_bytes(ic->sb->salt, SALT_SIZE); 4179 } 4180 4181 if (!ic->meta_dev) { 4182 if (ic->fix_padding) 4183 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING); 4184 ic->sb->journal_sections = cpu_to_le32(journal_sections); 4185 if (!interleave_sectors) 4186 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS; 4187 ic->sb->log2_interleave_sectors = __fls(interleave_sectors); 4188 ic->sb->log2_interleave_sectors = max_t(__u8, MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors); 4189 ic->sb->log2_interleave_sectors = min_t(__u8, MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors); 4190 4191 get_provided_data_sectors(ic); 4192 if (!ic->provided_data_sectors) 4193 return -EINVAL; 4194 } else { 4195 ic->sb->log2_interleave_sectors = 0; 4196 4197 get_provided_data_sectors(ic); 4198 if (!ic->provided_data_sectors) 4199 return -EINVAL; 4200 4201 try_smaller_buffer: 4202 ic->sb->journal_sections = cpu_to_le32(0); 4203 for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) { 4204 __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections); 4205 __u32 test_journal_sections = prev_journal_sections | (1U << test_bit); 4206 4207 if (test_journal_sections > journal_sections) 4208 continue; 4209 ic->sb->journal_sections = cpu_to_le32(test_journal_sections); 4210 if (calculate_device_limits(ic)) 4211 ic->sb->journal_sections = cpu_to_le32(prev_journal_sections); 4212 4213 } 4214 if (!le32_to_cpu(ic->sb->journal_sections)) { 4215 if (ic->log2_buffer_sectors > 3) { 4216 ic->log2_buffer_sectors--; 4217 goto try_smaller_buffer; 4218 } 4219 return -EINVAL; 4220 } 4221 } 4222 4223 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors); 4224 4225 sb_set_version(ic); 4226 4227 return 0; 4228 } 4229 4230 static void dm_integrity_free_page_list(struct page_list *pl) 4231 { 4232 unsigned int i; 4233 4234 if (!pl) 4235 return; 4236 for (i = 0; pl[i].page; i++) 4237 __free_page(pl[i].page); 4238 kvfree(pl); 4239 } 4240 4241 static struct page_list *dm_integrity_alloc_page_list(unsigned int n_pages) 4242 { 4243 struct page_list *pl; 4244 unsigned int i; 4245 4246 pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO); 4247 if (!pl) 4248 return NULL; 4249 4250 for (i = 0; i < n_pages; i++) { 4251 pl[i].page = alloc_page(GFP_KERNEL); 4252 if (!pl[i].page) { 4253 dm_integrity_free_page_list(pl); 4254 return NULL; 4255 } 4256 if (i) 4257 pl[i - 1].next = &pl[i]; 4258 } 4259 pl[i].page = NULL; 4260 pl[i].next = NULL; 4261 4262 return pl; 4263 } 4264 4265 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl) 4266 { 4267 unsigned int i; 4268 4269 for (i = 0; i < ic->journal_sections; i++) 4270 kvfree(sl[i]); 4271 kvfree(sl); 4272 } 4273 4274 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, 4275 struct page_list *pl) 4276 { 4277 struct scatterlist **sl; 4278 unsigned int i; 4279 4280 sl = kvmalloc_array(ic->journal_sections, 4281 sizeof(struct scatterlist *), 4282 GFP_KERNEL | __GFP_ZERO); 4283 if (!sl) 4284 return NULL; 4285 4286 for (i = 0; i < ic->journal_sections; i++) { 4287 struct scatterlist *s; 4288 unsigned int start_index, start_offset; 4289 unsigned int end_index, end_offset; 4290 unsigned int n_pages; 4291 unsigned int idx; 4292 4293 page_list_location(ic, i, 0, &start_index, &start_offset); 4294 page_list_location(ic, i, ic->journal_section_sectors - 1, 4295 &end_index, &end_offset); 4296 4297 n_pages = (end_index - start_index + 1); 4298 4299 s = kvmalloc_array(n_pages, sizeof(struct scatterlist), 4300 GFP_KERNEL); 4301 if (!s) { 4302 dm_integrity_free_journal_scatterlist(ic, sl); 4303 return NULL; 4304 } 4305 4306 sg_init_table(s, n_pages); 4307 for (idx = start_index; idx <= end_index; idx++) { 4308 char *va = lowmem_page_address(pl[idx].page); 4309 unsigned int start = 0, end = PAGE_SIZE; 4310 4311 if (idx == start_index) 4312 start = start_offset; 4313 if (idx == end_index) 4314 end = end_offset + (1 << SECTOR_SHIFT); 4315 sg_set_buf(&s[idx - start_index], va + start, end - start); 4316 } 4317 4318 sl[i] = s; 4319 } 4320 4321 return sl; 4322 } 4323 4324 static void free_alg(struct alg_spec *a) 4325 { 4326 kfree_sensitive(a->alg_string); 4327 kfree_sensitive(a->key); 4328 memset(a, 0, sizeof(*a)); 4329 } 4330 4331 static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval) 4332 { 4333 char *k; 4334 4335 free_alg(a); 4336 4337 a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL); 4338 if (!a->alg_string) 4339 goto nomem; 4340 4341 k = strchr(a->alg_string, ':'); 4342 if (k) { 4343 *k = 0; 4344 a->key_string = k + 1; 4345 if (strlen(a->key_string) & 1) 4346 goto inval; 4347 4348 a->key_size = strlen(a->key_string) / 2; 4349 a->key = kmalloc(a->key_size, GFP_KERNEL); 4350 if (!a->key) 4351 goto nomem; 4352 if (hex2bin(a->key, a->key_string, a->key_size)) 4353 goto inval; 4354 } 4355 4356 return 0; 4357 inval: 4358 *error = error_inval; 4359 return -EINVAL; 4360 nomem: 4361 *error = "Out of memory for an argument"; 4362 return -ENOMEM; 4363 } 4364 4365 static int get_mac(struct crypto_shash **shash, struct crypto_ahash **ahash, 4366 struct alg_spec *a, char **error, char *error_alg, char *error_key) 4367 { 4368 int r; 4369 4370 if (a->alg_string) { 4371 if (shash) { 4372 *shash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY); 4373 if (IS_ERR(*shash)) { 4374 *shash = NULL; 4375 goto try_ahash; 4376 } 4377 if (a->key) { 4378 r = crypto_shash_setkey(*shash, a->key, a->key_size); 4379 if (r) { 4380 *error = error_key; 4381 return r; 4382 } 4383 } else if (crypto_shash_get_flags(*shash) & CRYPTO_TFM_NEED_KEY) { 4384 *error = error_key; 4385 return -ENOKEY; 4386 } 4387 return 0; 4388 } 4389 try_ahash: 4390 if (ahash) { 4391 *ahash = crypto_alloc_ahash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY); 4392 if (IS_ERR(*ahash)) { 4393 *error = error_alg; 4394 r = PTR_ERR(*ahash); 4395 *ahash = NULL; 4396 return r; 4397 } 4398 if (a->key) { 4399 r = crypto_ahash_setkey(*ahash, a->key, a->key_size); 4400 if (r) { 4401 *error = error_key; 4402 return r; 4403 } 4404 } else if (crypto_ahash_get_flags(*ahash) & CRYPTO_TFM_NEED_KEY) { 4405 *error = error_key; 4406 return -ENOKEY; 4407 } 4408 return 0; 4409 } 4410 *error = error_alg; 4411 return -ENOENT; 4412 } 4413 4414 return 0; 4415 } 4416 4417 static int create_journal(struct dm_integrity_c *ic, char **error) 4418 { 4419 int r = 0; 4420 unsigned int i; 4421 __u64 journal_pages, journal_desc_size, journal_tree_size; 4422 unsigned char *crypt_data = NULL, *crypt_iv = NULL; 4423 struct skcipher_request *req = NULL; 4424 4425 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL); 4426 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL); 4427 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL); 4428 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL); 4429 4430 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors, 4431 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT); 4432 journal_desc_size = journal_pages * sizeof(struct page_list); 4433 if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) { 4434 *error = "Journal doesn't fit into memory"; 4435 r = -ENOMEM; 4436 goto bad; 4437 } 4438 ic->journal_pages = journal_pages; 4439 4440 ic->journal = dm_integrity_alloc_page_list(ic->journal_pages); 4441 if (!ic->journal) { 4442 *error = "Could not allocate memory for journal"; 4443 r = -ENOMEM; 4444 goto bad; 4445 } 4446 if (ic->journal_crypt_alg.alg_string) { 4447 unsigned int ivsize, blocksize; 4448 struct journal_completion comp; 4449 4450 comp.ic = ic; 4451 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY); 4452 if (IS_ERR(ic->journal_crypt)) { 4453 *error = "Invalid journal cipher"; 4454 r = PTR_ERR(ic->journal_crypt); 4455 ic->journal_crypt = NULL; 4456 goto bad; 4457 } 4458 ivsize = crypto_skcipher_ivsize(ic->journal_crypt); 4459 blocksize = crypto_skcipher_blocksize(ic->journal_crypt); 4460 4461 if (ic->journal_crypt_alg.key) { 4462 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key, 4463 ic->journal_crypt_alg.key_size); 4464 if (r) { 4465 *error = "Error setting encryption key"; 4466 goto bad; 4467 } 4468 } 4469 DEBUG_print("cipher %s, block size %u iv size %u\n", 4470 ic->journal_crypt_alg.alg_string, blocksize, ivsize); 4471 4472 ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages); 4473 if (!ic->journal_io) { 4474 *error = "Could not allocate memory for journal io"; 4475 r = -ENOMEM; 4476 goto bad; 4477 } 4478 4479 if (blocksize == 1) { 4480 struct scatterlist *sg; 4481 4482 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); 4483 if (!req) { 4484 *error = "Could not allocate crypt request"; 4485 r = -ENOMEM; 4486 goto bad; 4487 } 4488 4489 crypt_iv = kzalloc(ivsize, GFP_KERNEL); 4490 if (!crypt_iv) { 4491 *error = "Could not allocate iv"; 4492 r = -ENOMEM; 4493 goto bad; 4494 } 4495 4496 ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages); 4497 if (!ic->journal_xor) { 4498 *error = "Could not allocate memory for journal xor"; 4499 r = -ENOMEM; 4500 goto bad; 4501 } 4502 4503 sg = kvmalloc_array(ic->journal_pages + 1, 4504 sizeof(struct scatterlist), 4505 GFP_KERNEL); 4506 if (!sg) { 4507 *error = "Unable to allocate sg list"; 4508 r = -ENOMEM; 4509 goto bad; 4510 } 4511 sg_init_table(sg, ic->journal_pages + 1); 4512 for (i = 0; i < ic->journal_pages; i++) { 4513 char *va = lowmem_page_address(ic->journal_xor[i].page); 4514 4515 clear_page(va); 4516 sg_set_buf(&sg[i], va, PAGE_SIZE); 4517 } 4518 sg_set_buf(&sg[i], &ic->commit_ids, sizeof(ic->commit_ids)); 4519 4520 skcipher_request_set_crypt(req, sg, sg, 4521 PAGE_SIZE * ic->journal_pages + sizeof(ic->commit_ids), crypt_iv); 4522 init_completion(&comp.comp); 4523 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 4524 if (do_crypt(true, req, &comp)) 4525 wait_for_completion(&comp.comp); 4526 kvfree(sg); 4527 r = dm_integrity_failed(ic); 4528 if (r) { 4529 *error = "Unable to encrypt journal"; 4530 goto bad; 4531 } 4532 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data"); 4533 4534 crypto_free_skcipher(ic->journal_crypt); 4535 ic->journal_crypt = NULL; 4536 } else { 4537 unsigned int crypt_len = roundup(ivsize, blocksize); 4538 4539 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); 4540 if (!req) { 4541 *error = "Could not allocate crypt request"; 4542 r = -ENOMEM; 4543 goto bad; 4544 } 4545 4546 crypt_iv = kmalloc(ivsize, GFP_KERNEL); 4547 if (!crypt_iv) { 4548 *error = "Could not allocate iv"; 4549 r = -ENOMEM; 4550 goto bad; 4551 } 4552 4553 crypt_data = kmalloc(crypt_len, GFP_KERNEL); 4554 if (!crypt_data) { 4555 *error = "Unable to allocate crypt data"; 4556 r = -ENOMEM; 4557 goto bad; 4558 } 4559 4560 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); 4561 if (!ic->journal_scatterlist) { 4562 *error = "Unable to allocate sg list"; 4563 r = -ENOMEM; 4564 goto bad; 4565 } 4566 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io); 4567 if (!ic->journal_io_scatterlist) { 4568 *error = "Unable to allocate sg list"; 4569 r = -ENOMEM; 4570 goto bad; 4571 } 4572 ic->sk_requests = kvmalloc_array(ic->journal_sections, 4573 sizeof(struct skcipher_request *), 4574 GFP_KERNEL | __GFP_ZERO); 4575 if (!ic->sk_requests) { 4576 *error = "Unable to allocate sk requests"; 4577 r = -ENOMEM; 4578 goto bad; 4579 } 4580 for (i = 0; i < ic->journal_sections; i++) { 4581 struct scatterlist sg; 4582 struct skcipher_request *section_req; 4583 __le32 section_le = cpu_to_le32(i); 4584 4585 memset(crypt_iv, 0x00, ivsize); 4586 memset(crypt_data, 0x00, crypt_len); 4587 memcpy(crypt_data, §ion_le, min_t(size_t, crypt_len, sizeof(section_le))); 4588 4589 sg_init_one(&sg, crypt_data, crypt_len); 4590 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv); 4591 init_completion(&comp.comp); 4592 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 4593 if (do_crypt(true, req, &comp)) 4594 wait_for_completion(&comp.comp); 4595 4596 r = dm_integrity_failed(ic); 4597 if (r) { 4598 *error = "Unable to generate iv"; 4599 goto bad; 4600 } 4601 4602 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); 4603 if (!section_req) { 4604 *error = "Unable to allocate crypt request"; 4605 r = -ENOMEM; 4606 goto bad; 4607 } 4608 section_req->iv = kmalloc_array(ivsize, 2, 4609 GFP_KERNEL); 4610 if (!section_req->iv) { 4611 skcipher_request_free(section_req); 4612 *error = "Unable to allocate iv"; 4613 r = -ENOMEM; 4614 goto bad; 4615 } 4616 memcpy(section_req->iv + ivsize, crypt_data, ivsize); 4617 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT; 4618 ic->sk_requests[i] = section_req; 4619 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i); 4620 } 4621 } 4622 } 4623 4624 for (i = 0; i < N_COMMIT_IDS; i++) { 4625 unsigned int j; 4626 4627 retest_commit_id: 4628 for (j = 0; j < i; j++) { 4629 if (ic->commit_ids[j] == ic->commit_ids[i]) { 4630 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1); 4631 goto retest_commit_id; 4632 } 4633 } 4634 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]); 4635 } 4636 4637 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node); 4638 if (journal_tree_size > ULONG_MAX) { 4639 *error = "Journal doesn't fit into memory"; 4640 r = -ENOMEM; 4641 goto bad; 4642 } 4643 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL); 4644 if (!ic->journal_tree) { 4645 *error = "Could not allocate memory for journal tree"; 4646 r = -ENOMEM; 4647 } 4648 bad: 4649 kfree(crypt_data); 4650 kfree(crypt_iv); 4651 skcipher_request_free(req); 4652 4653 return r; 4654 } 4655 4656 /* 4657 * Construct a integrity mapping 4658 * 4659 * Arguments: 4660 * device 4661 * offset from the start of the device 4662 * tag size 4663 * D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode 4664 * number of optional arguments 4665 * optional arguments: 4666 * journal_sectors 4667 * interleave_sectors 4668 * buffer_sectors 4669 * journal_watermark 4670 * commit_time 4671 * meta_device 4672 * block_size 4673 * sectors_per_bit 4674 * bitmap_flush_interval 4675 * internal_hash 4676 * journal_crypt 4677 * journal_mac 4678 * recalculate 4679 */ 4680 static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv) 4681 { 4682 struct dm_integrity_c *ic; 4683 char dummy; 4684 int r; 4685 unsigned int extra_args; 4686 struct dm_arg_set as; 4687 static const struct dm_arg _args[] = { 4688 {0, 18, "Invalid number of feature args"}, 4689 }; 4690 unsigned int journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; 4691 bool should_write_sb; 4692 __u64 threshold; 4693 unsigned long long start; 4694 __s8 log2_sectors_per_bitmap_bit = -1; 4695 __s8 log2_blocks_per_bitmap_bit; 4696 __u64 bits_in_journal; 4697 __u64 n_bitmap_bits; 4698 4699 #define DIRECT_ARGUMENTS 4 4700 4701 if (argc <= DIRECT_ARGUMENTS) { 4702 ti->error = "Invalid argument count"; 4703 return -EINVAL; 4704 } 4705 4706 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL); 4707 if (!ic) { 4708 ti->error = "Cannot allocate integrity context"; 4709 return -ENOMEM; 4710 } 4711 ti->private = ic; 4712 ti->per_io_data_size = sizeof(struct dm_integrity_io); 4713 ic->ti = ti; 4714 4715 ic->in_progress = RB_ROOT; 4716 INIT_LIST_HEAD(&ic->wait_list); 4717 init_waitqueue_head(&ic->endio_wait); 4718 bio_list_init(&ic->flush_bio_list); 4719 init_waitqueue_head(&ic->copy_to_journal_wait); 4720 init_completion(&ic->crypto_backoff); 4721 atomic64_set(&ic->number_of_mismatches, 0); 4722 ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL; 4723 4724 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev); 4725 if (r) { 4726 ti->error = "Device lookup failed"; 4727 goto bad; 4728 } 4729 4730 if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) { 4731 ti->error = "Invalid starting offset"; 4732 r = -EINVAL; 4733 goto bad; 4734 } 4735 ic->start = start; 4736 4737 if (strcmp(argv[2], "-")) { 4738 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) { 4739 ti->error = "Invalid tag size"; 4740 r = -EINVAL; 4741 goto bad; 4742 } 4743 } 4744 4745 if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") || 4746 !strcmp(argv[3], "D") || !strcmp(argv[3], "R") || 4747 !strcmp(argv[3], "I")) { 4748 ic->mode = argv[3][0]; 4749 } else { 4750 ti->error = "Invalid mode (expecting J, B, D, R, I)"; 4751 r = -EINVAL; 4752 goto bad; 4753 } 4754 4755 journal_sectors = 0; 4756 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS; 4757 buffer_sectors = DEFAULT_BUFFER_SECTORS; 4758 journal_watermark = DEFAULT_JOURNAL_WATERMARK; 4759 sync_msec = DEFAULT_SYNC_MSEC; 4760 ic->sectors_per_block = 1; 4761 4762 as.argc = argc - DIRECT_ARGUMENTS; 4763 as.argv = argv + DIRECT_ARGUMENTS; 4764 r = dm_read_arg_group(_args, &as, &extra_args, &ti->error); 4765 if (r) 4766 goto bad; 4767 4768 while (extra_args--) { 4769 const char *opt_string; 4770 unsigned int val; 4771 unsigned long long llval; 4772 4773 opt_string = dm_shift_arg(&as); 4774 if (!opt_string) { 4775 r = -EINVAL; 4776 ti->error = "Not enough feature arguments"; 4777 goto bad; 4778 } 4779 if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1) 4780 journal_sectors = val ? val : 1; 4781 else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1) 4782 interleave_sectors = val; 4783 else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1) 4784 buffer_sectors = val; 4785 else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100) 4786 journal_watermark = val; 4787 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1) 4788 sync_msec = val; 4789 else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) { 4790 if (ic->meta_dev) { 4791 dm_put_device(ti, ic->meta_dev); 4792 ic->meta_dev = NULL; 4793 } 4794 r = dm_get_device(ti, strchr(opt_string, ':') + 1, 4795 dm_table_get_mode(ti->table), &ic->meta_dev); 4796 if (r) { 4797 ti->error = "Device lookup failed"; 4798 goto bad; 4799 } 4800 } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) { 4801 if (val < 1 << SECTOR_SHIFT || 4802 val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT || 4803 (val & (val - 1))) { 4804 r = -EINVAL; 4805 ti->error = "Invalid block_size argument"; 4806 goto bad; 4807 } 4808 ic->sectors_per_block = val >> SECTOR_SHIFT; 4809 } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) { 4810 log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval); 4811 } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) { 4812 if ((uint64_t)val >= (uint64_t)UINT_MAX * 1000 / HZ) { 4813 r = -EINVAL; 4814 ti->error = "Invalid bitmap_flush_interval argument"; 4815 goto bad; 4816 } 4817 ic->bitmap_flush_interval = msecs_to_jiffies(val); 4818 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { 4819 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, 4820 "Invalid internal_hash argument"); 4821 if (r) 4822 goto bad; 4823 } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { 4824 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, 4825 "Invalid journal_crypt argument"); 4826 if (r) 4827 goto bad; 4828 } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { 4829 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, 4830 "Invalid journal_mac argument"); 4831 if (r) 4832 goto bad; 4833 } else if (!strcmp(opt_string, "recalculate")) { 4834 ic->recalculate_flag = true; 4835 } else if (!strcmp(opt_string, "reset_recalculate")) { 4836 ic->recalculate_flag = true; 4837 ic->reset_recalculate_flag = true; 4838 } else if (!strcmp(opt_string, "allow_discards")) { 4839 ic->discard = true; 4840 } else if (!strcmp(opt_string, "fix_padding")) { 4841 ic->fix_padding = true; 4842 } else if (!strcmp(opt_string, "fix_hmac")) { 4843 ic->fix_hmac = true; 4844 } else if (!strcmp(opt_string, "legacy_recalculate")) { 4845 ic->legacy_recalculate = true; 4846 } else { 4847 r = -EINVAL; 4848 ti->error = "Invalid argument"; 4849 goto bad; 4850 } 4851 } 4852 4853 ic->data_device_sectors = bdev_nr_sectors(ic->dev->bdev); 4854 if (!ic->meta_dev) 4855 ic->meta_device_sectors = ic->data_device_sectors; 4856 else 4857 ic->meta_device_sectors = bdev_nr_sectors(ic->meta_dev->bdev); 4858 4859 if (!journal_sectors) { 4860 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS, 4861 ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR); 4862 } 4863 4864 if (!buffer_sectors) 4865 buffer_sectors = 1; 4866 ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT); 4867 4868 r = get_mac(&ic->internal_shash, &ic->internal_ahash, &ic->internal_hash_alg, &ti->error, 4869 "Invalid internal hash", "Error setting internal hash key"); 4870 if (r) 4871 goto bad; 4872 if (ic->internal_shash) { 4873 ic->internal_hash = true; 4874 ic->internal_hash_digestsize = crypto_shash_digestsize(ic->internal_shash); 4875 } 4876 if (ic->internal_ahash) { 4877 ic->internal_hash = true; 4878 ic->internal_hash_digestsize = crypto_ahash_digestsize(ic->internal_ahash); 4879 r = mempool_init_kmalloc_pool(&ic->ahash_req_pool, AHASH_MEMPOOL, 4880 sizeof(struct ahash_request) + crypto_ahash_reqsize(ic->internal_ahash)); 4881 if (r) { 4882 ti->error = "Cannot allocate mempool"; 4883 goto bad; 4884 } 4885 } 4886 4887 r = get_mac(&ic->journal_mac, NULL, &ic->journal_mac_alg, &ti->error, 4888 "Invalid journal mac", "Error setting journal mac key"); 4889 if (r) 4890 goto bad; 4891 4892 if (!ic->tag_size) { 4893 if (!ic->internal_hash) { 4894 ti->error = "Unknown tag size"; 4895 r = -EINVAL; 4896 goto bad; 4897 } 4898 ic->tag_size = ic->internal_hash_digestsize; 4899 } 4900 if (ic->tag_size > MAX_TAG_SIZE) { 4901 ti->error = "Too big tag size"; 4902 r = -EINVAL; 4903 goto bad; 4904 } 4905 if (!(ic->tag_size & (ic->tag_size - 1))) 4906 ic->log2_tag_size = __ffs(ic->tag_size); 4907 else 4908 ic->log2_tag_size = -1; 4909 4910 if (ic->mode == 'I') { 4911 struct blk_integrity *bi; 4912 if (ic->meta_dev) { 4913 r = -EINVAL; 4914 ti->error = "Metadata device not supported in inline mode"; 4915 goto bad; 4916 } 4917 if (!ic->internal_hash_alg.alg_string) { 4918 r = -EINVAL; 4919 ti->error = "Internal hash not set in inline mode"; 4920 goto bad; 4921 } 4922 if (ic->journal_crypt_alg.alg_string || ic->journal_mac_alg.alg_string) { 4923 r = -EINVAL; 4924 ti->error = "Journal crypt not supported in inline mode"; 4925 goto bad; 4926 } 4927 if (ic->discard) { 4928 r = -EINVAL; 4929 ti->error = "Discards not supported in inline mode"; 4930 goto bad; 4931 } 4932 bi = blk_get_integrity(ic->dev->bdev->bd_disk); 4933 if (!bi || bi->csum_type != BLK_INTEGRITY_CSUM_NONE) { 4934 r = -EINVAL; 4935 ti->error = "Integrity profile not supported"; 4936 goto bad; 4937 } 4938 /*printk("tag_size: %u, metadata_size: %u\n", bi->tag_size, bi->metadata_size);*/ 4939 if (bi->metadata_size < ic->tag_size) { 4940 r = -EINVAL; 4941 ti->error = "The integrity profile is smaller than tag size"; 4942 goto bad; 4943 } 4944 if ((unsigned long)bi->metadata_size > PAGE_SIZE / 2) { 4945 r = -EINVAL; 4946 ti->error = "Too big tuple size"; 4947 goto bad; 4948 } 4949 ic->tuple_size = bi->metadata_size; 4950 if (1 << bi->interval_exp != ic->sectors_per_block << SECTOR_SHIFT) { 4951 r = -EINVAL; 4952 ti->error = "Integrity profile sector size mismatch"; 4953 goto bad; 4954 } 4955 } 4956 4957 if (ic->mode == 'B' && !ic->internal_hash) { 4958 r = -EINVAL; 4959 ti->error = "Bitmap mode can be only used with internal hash"; 4960 goto bad; 4961 } 4962 4963 if (ic->discard && !ic->internal_hash) { 4964 r = -EINVAL; 4965 ti->error = "Discard can be only used with internal hash"; 4966 goto bad; 4967 } 4968 4969 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec); 4970 ic->autocommit_msec = sync_msec; 4971 timer_setup(&ic->autocommit_timer, autocommit_fn, 0); 4972 4973 ic->io = dm_io_client_create(); 4974 if (IS_ERR(ic->io)) { 4975 r = PTR_ERR(ic->io); 4976 ic->io = NULL; 4977 ti->error = "Cannot allocate dm io"; 4978 goto bad; 4979 } 4980 4981 r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache); 4982 if (r) { 4983 ti->error = "Cannot allocate mempool"; 4984 goto bad; 4985 } 4986 4987 r = mempool_init_page_pool(&ic->recheck_pool, 1, ic->mode == 'I' ? 1 : 0); 4988 if (r) { 4989 ti->error = "Cannot allocate mempool"; 4990 goto bad; 4991 } 4992 4993 if (ic->mode == 'I') { 4994 r = bioset_init(&ic->recheck_bios, RECHECK_POOL_SIZE, 0, BIOSET_NEED_BVECS); 4995 if (r) { 4996 ti->error = "Cannot allocate bio set"; 4997 goto bad; 4998 } 4999 r = bioset_init(&ic->recalc_bios, 1, 0, BIOSET_NEED_BVECS); 5000 if (r) { 5001 ti->error = "Cannot allocate bio set"; 5002 goto bad; 5003 } 5004 } 5005 5006 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata", 5007 WQ_MEM_RECLAIM | WQ_PERCPU, 5008 METADATA_WORKQUEUE_MAX_ACTIVE); 5009 if (!ic->metadata_wq) { 5010 ti->error = "Cannot allocate workqueue"; 5011 r = -ENOMEM; 5012 goto bad; 5013 } 5014 5015 /* 5016 * If this workqueue weren't ordered, it would cause bio reordering 5017 * and reduced performance. 5018 */ 5019 ic->wait_wq = alloc_ordered_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM); 5020 if (!ic->wait_wq) { 5021 ti->error = "Cannot allocate workqueue"; 5022 r = -ENOMEM; 5023 goto bad; 5024 } 5025 5026 ic->offload_wq = alloc_workqueue("dm-integrity-offload", 5027 WQ_MEM_RECLAIM | WQ_PERCPU, 5028 METADATA_WORKQUEUE_MAX_ACTIVE); 5029 if (!ic->offload_wq) { 5030 ti->error = "Cannot allocate workqueue"; 5031 r = -ENOMEM; 5032 goto bad; 5033 } 5034 5035 ic->commit_wq = alloc_workqueue("dm-integrity-commit", 5036 WQ_MEM_RECLAIM | WQ_PERCPU, 1); 5037 if (!ic->commit_wq) { 5038 ti->error = "Cannot allocate workqueue"; 5039 r = -ENOMEM; 5040 goto bad; 5041 } 5042 INIT_WORK(&ic->commit_work, integrity_commit); 5043 5044 if (ic->mode == 'J' || ic->mode == 'B') { 5045 ic->writer_wq = alloc_workqueue("dm-integrity-writer", 5046 WQ_MEM_RECLAIM | WQ_PERCPU, 1); 5047 if (!ic->writer_wq) { 5048 ti->error = "Cannot allocate workqueue"; 5049 r = -ENOMEM; 5050 goto bad; 5051 } 5052 INIT_WORK(&ic->writer_work, integrity_writer); 5053 } 5054 5055 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL); 5056 if (!ic->sb) { 5057 r = -ENOMEM; 5058 ti->error = "Cannot allocate superblock area"; 5059 goto bad; 5060 } 5061 5062 r = sync_rw_sb(ic, REQ_OP_READ); 5063 if (r) { 5064 ti->error = "Error reading superblock"; 5065 goto bad; 5066 } 5067 should_write_sb = false; 5068 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) { 5069 if (ic->mode != 'R') { 5070 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) { 5071 r = -EINVAL; 5072 ti->error = "The device is not initialized"; 5073 goto bad; 5074 } 5075 } 5076 5077 r = initialize_superblock(ic, journal_sectors, interleave_sectors); 5078 if (r) { 5079 ti->error = "Could not initialize superblock"; 5080 goto bad; 5081 } 5082 if (ic->mode != 'R') 5083 should_write_sb = true; 5084 } 5085 5086 if (!ic->sb->version || ic->sb->version > SB_VERSION_6) { 5087 r = -EINVAL; 5088 ti->error = "Unknown version"; 5089 goto bad; 5090 } 5091 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_INLINE)) != (ic->mode == 'I')) { 5092 r = -EINVAL; 5093 ti->error = "Inline flag mismatch"; 5094 goto bad; 5095 } 5096 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) { 5097 r = -EINVAL; 5098 ti->error = "Tag size doesn't match the information in superblock"; 5099 goto bad; 5100 } 5101 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) { 5102 r = -EINVAL; 5103 ti->error = "Block size doesn't match the information in superblock"; 5104 goto bad; 5105 } 5106 if (ic->mode != 'I') { 5107 if (!le32_to_cpu(ic->sb->journal_sections)) { 5108 r = -EINVAL; 5109 ti->error = "Corrupted superblock, journal_sections is 0"; 5110 goto bad; 5111 } 5112 } else { 5113 if (le32_to_cpu(ic->sb->journal_sections)) { 5114 r = -EINVAL; 5115 ti->error = "Corrupted superblock, journal_sections is not 0"; 5116 goto bad; 5117 } 5118 } 5119 /* make sure that ti->max_io_len doesn't overflow */ 5120 if (!ic->meta_dev) { 5121 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS || 5122 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) { 5123 r = -EINVAL; 5124 ti->error = "Invalid interleave_sectors in the superblock"; 5125 goto bad; 5126 } 5127 } else { 5128 if (ic->sb->log2_interleave_sectors) { 5129 r = -EINVAL; 5130 ti->error = "Invalid interleave_sectors in the superblock"; 5131 goto bad; 5132 } 5133 } 5134 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) { 5135 r = -EINVAL; 5136 ti->error = "Journal mac mismatch"; 5137 goto bad; 5138 } 5139 5140 get_provided_data_sectors(ic); 5141 if (!ic->provided_data_sectors) { 5142 r = -EINVAL; 5143 ti->error = "The device is too small"; 5144 goto bad; 5145 } 5146 5147 try_smaller_buffer: 5148 r = calculate_device_limits(ic); 5149 if (r) { 5150 if (ic->meta_dev) { 5151 if (ic->log2_buffer_sectors > 3) { 5152 ic->log2_buffer_sectors--; 5153 goto try_smaller_buffer; 5154 } 5155 } 5156 ti->error = "The device is too small"; 5157 goto bad; 5158 } 5159 5160 if (log2_sectors_per_bitmap_bit < 0) 5161 log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT); 5162 if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block) 5163 log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block; 5164 5165 bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3); 5166 if (bits_in_journal > UINT_MAX) 5167 bits_in_journal = UINT_MAX; 5168 if (bits_in_journal) 5169 while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit) 5170 log2_sectors_per_bitmap_bit++; 5171 5172 log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block; 5173 ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit; 5174 if (should_write_sb) 5175 ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit; 5176 5177 n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) 5178 + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit; 5179 ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8); 5180 5181 if (!ic->meta_dev) 5182 ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run)); 5183 5184 if (ti->len > ic->provided_data_sectors) { 5185 r = -EINVAL; 5186 ti->error = "Not enough provided sectors for requested mapping size"; 5187 goto bad; 5188 } 5189 5190 threshold = (__u64)ic->journal_entries * (100 - journal_watermark); 5191 threshold += 50; 5192 do_div(threshold, 100); 5193 ic->free_sectors_threshold = threshold; 5194 5195 DEBUG_print("initialized:\n"); 5196 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size)); 5197 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size); 5198 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector); 5199 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries); 5200 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors); 5201 DEBUG_print(" journal_sections %u\n", (unsigned int)le32_to_cpu(ic->sb->journal_sections)); 5202 DEBUG_print(" journal_entries %u\n", ic->journal_entries); 5203 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors); 5204 DEBUG_print(" data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev)); 5205 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors); 5206 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run); 5207 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run); 5208 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors); 5209 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors); 5210 DEBUG_print(" bits_in_journal %llu\n", bits_in_journal); 5211 5212 if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) { 5213 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); 5214 ic->sb->recalc_sector = cpu_to_le64(0); 5215 } 5216 5217 if (ic->internal_hash) { 5218 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", 5219 WQ_MEM_RECLAIM | WQ_PERCPU, 1); 5220 if (!ic->recalc_wq) { 5221 ti->error = "Cannot allocate workqueue"; 5222 r = -ENOMEM; 5223 goto bad; 5224 } 5225 INIT_WORK(&ic->recalc_work, ic->mode == 'I' ? integrity_recalc_inline : integrity_recalc); 5226 } else { 5227 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { 5228 ti->error = "Recalculate can only be specified with internal_hash"; 5229 r = -EINVAL; 5230 goto bad; 5231 } 5232 } 5233 5234 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && 5235 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors && 5236 dm_integrity_disable_recalculate(ic)) { 5237 ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\""; 5238 r = -EOPNOTSUPP; 5239 goto bad; 5240 } 5241 5242 ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev, 5243 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL, 0); 5244 if (IS_ERR(ic->bufio)) { 5245 r = PTR_ERR(ic->bufio); 5246 ti->error = "Cannot initialize dm-bufio"; 5247 ic->bufio = NULL; 5248 goto bad; 5249 } 5250 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors); 5251 5252 if (ic->mode != 'R' && ic->mode != 'I') { 5253 r = create_journal(ic, &ti->error); 5254 if (r) 5255 goto bad; 5256 5257 } 5258 5259 if (ic->mode == 'B') { 5260 unsigned int i; 5261 unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE); 5262 5263 ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages); 5264 if (!ic->recalc_bitmap) { 5265 ti->error = "Could not allocate memory for bitmap"; 5266 r = -ENOMEM; 5267 goto bad; 5268 } 5269 ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages); 5270 if (!ic->may_write_bitmap) { 5271 ti->error = "Could not allocate memory for bitmap"; 5272 r = -ENOMEM; 5273 goto bad; 5274 } 5275 ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL); 5276 if (!ic->bbs) { 5277 ti->error = "Could not allocate memory for bitmap"; 5278 r = -ENOMEM; 5279 goto bad; 5280 } 5281 INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work); 5282 for (i = 0; i < ic->n_bitmap_blocks; i++) { 5283 struct bitmap_block_status *bbs = &ic->bbs[i]; 5284 unsigned int sector, pl_index, pl_offset; 5285 5286 INIT_WORK(&bbs->work, bitmap_block_work); 5287 bbs->ic = ic; 5288 bbs->idx = i; 5289 bio_list_init(&bbs->bio_queue); 5290 spin_lock_init(&bbs->bio_queue_lock); 5291 5292 sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT); 5293 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); 5294 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); 5295 5296 bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset; 5297 } 5298 } 5299 5300 if (should_write_sb) { 5301 init_journal(ic, 0, ic->journal_sections, 0); 5302 r = dm_integrity_failed(ic); 5303 if (unlikely(r)) { 5304 ti->error = "Error initializing journal"; 5305 goto bad; 5306 } 5307 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); 5308 if (r) { 5309 ti->error = "Error initializing superblock"; 5310 goto bad; 5311 } 5312 ic->just_formatted = true; 5313 } 5314 5315 if (!ic->meta_dev && ic->mode != 'I') { 5316 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors); 5317 if (r) 5318 goto bad; 5319 } 5320 if (ic->mode == 'B') { 5321 unsigned int max_io_len; 5322 5323 max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8); 5324 if (!max_io_len) 5325 max_io_len = 1U << 31; 5326 DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len); 5327 if (!ti->max_io_len || ti->max_io_len > max_io_len) { 5328 r = dm_set_target_max_io_len(ti, max_io_len); 5329 if (r) 5330 goto bad; 5331 } 5332 } 5333 5334 ti->num_flush_bios = 1; 5335 ti->flush_supported = true; 5336 if (ic->discard) 5337 ti->num_discard_bios = 1; 5338 5339 if (ic->mode == 'I') 5340 ti->mempool_needs_integrity = true; 5341 5342 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1); 5343 return 0; 5344 5345 bad: 5346 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0); 5347 dm_integrity_dtr(ti); 5348 return r; 5349 } 5350 5351 static void dm_integrity_dtr(struct dm_target *ti) 5352 { 5353 struct dm_integrity_c *ic = ti->private; 5354 5355 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); 5356 BUG_ON(!list_empty(&ic->wait_list)); 5357 5358 if (ic->mode == 'B' && ic->bitmap_flush_work.work.func) 5359 cancel_delayed_work_sync(&ic->bitmap_flush_work); 5360 if (ic->metadata_wq) 5361 destroy_workqueue(ic->metadata_wq); 5362 if (ic->wait_wq) 5363 destroy_workqueue(ic->wait_wq); 5364 if (ic->offload_wq) 5365 destroy_workqueue(ic->offload_wq); 5366 if (ic->commit_wq) 5367 destroy_workqueue(ic->commit_wq); 5368 if (ic->writer_wq) 5369 destroy_workqueue(ic->writer_wq); 5370 if (ic->recalc_wq) 5371 destroy_workqueue(ic->recalc_wq); 5372 kvfree(ic->bbs); 5373 if (ic->bufio) 5374 dm_bufio_client_destroy(ic->bufio); 5375 mempool_free(ic->journal_ahash_req, &ic->ahash_req_pool); 5376 mempool_exit(&ic->ahash_req_pool); 5377 bioset_exit(&ic->recalc_bios); 5378 bioset_exit(&ic->recheck_bios); 5379 mempool_exit(&ic->recheck_pool); 5380 mempool_exit(&ic->journal_io_mempool); 5381 if (ic->io) 5382 dm_io_client_destroy(ic->io); 5383 if (ic->dev) 5384 dm_put_device(ti, ic->dev); 5385 if (ic->meta_dev) 5386 dm_put_device(ti, ic->meta_dev); 5387 dm_integrity_free_page_list(ic->journal); 5388 dm_integrity_free_page_list(ic->journal_io); 5389 dm_integrity_free_page_list(ic->journal_xor); 5390 dm_integrity_free_page_list(ic->recalc_bitmap); 5391 dm_integrity_free_page_list(ic->may_write_bitmap); 5392 if (ic->journal_scatterlist) 5393 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist); 5394 if (ic->journal_io_scatterlist) 5395 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist); 5396 if (ic->sk_requests) { 5397 unsigned int i; 5398 5399 for (i = 0; i < ic->journal_sections; i++) { 5400 struct skcipher_request *req; 5401 5402 req = ic->sk_requests[i]; 5403 if (req) { 5404 kfree_sensitive(req->iv); 5405 skcipher_request_free(req); 5406 } 5407 } 5408 kvfree(ic->sk_requests); 5409 } 5410 kvfree(ic->journal_tree); 5411 if (ic->sb) 5412 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT); 5413 5414 if (ic->internal_shash) 5415 crypto_free_shash(ic->internal_shash); 5416 if (ic->internal_ahash) 5417 crypto_free_ahash(ic->internal_ahash); 5418 free_alg(&ic->internal_hash_alg); 5419 5420 if (ic->journal_crypt) 5421 crypto_free_skcipher(ic->journal_crypt); 5422 free_alg(&ic->journal_crypt_alg); 5423 5424 if (ic->journal_mac) 5425 crypto_free_shash(ic->journal_mac); 5426 free_alg(&ic->journal_mac_alg); 5427 5428 kfree(ic); 5429 dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1); 5430 } 5431 5432 static struct target_type integrity_target = { 5433 .name = "integrity", 5434 .version = {1, 14, 0}, 5435 .module = THIS_MODULE, 5436 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, 5437 .ctr = dm_integrity_ctr, 5438 .dtr = dm_integrity_dtr, 5439 .map = dm_integrity_map, 5440 .end_io = dm_integrity_end_io, 5441 .postsuspend = dm_integrity_postsuspend, 5442 .resume = dm_integrity_resume, 5443 .status = dm_integrity_status, 5444 .iterate_devices = dm_integrity_iterate_devices, 5445 .io_hints = dm_integrity_io_hints, 5446 }; 5447 5448 static int __init dm_integrity_init(void) 5449 { 5450 int r; 5451 5452 journal_io_cache = kmem_cache_create("integrity_journal_io", 5453 sizeof(struct journal_io), 0, 0, NULL); 5454 if (!journal_io_cache) { 5455 DMERR("can't allocate journal io cache"); 5456 return -ENOMEM; 5457 } 5458 5459 r = dm_register_target(&integrity_target); 5460 if (r < 0) { 5461 kmem_cache_destroy(journal_io_cache); 5462 return r; 5463 } 5464 5465 return 0; 5466 } 5467 5468 static void __exit dm_integrity_exit(void) 5469 { 5470 dm_unregister_target(&integrity_target); 5471 kmem_cache_destroy(journal_io_cache); 5472 } 5473 5474 module_init(dm_integrity_init); 5475 module_exit(dm_integrity_exit); 5476 5477 MODULE_AUTHOR("Milan Broz"); 5478 MODULE_AUTHOR("Mikulas Patocka"); 5479 MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension"); 5480 MODULE_LICENSE("GPL"); 5481