xref: /linux/drivers/md/dm-integrity.c (revision b491e6a7391e3ecdebdd7a097550195cc878924a)
1 /*
2  * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
3  * Copyright (C) 2016-2017 Milan Broz
4  * Copyright (C) 2016-2017 Mikulas Patocka
5  *
6  * This file is released under the GPL.
7  */
8 
9 #include "dm-bio-record.h"
10 
11 #include <linux/compiler.h>
12 #include <linux/module.h>
13 #include <linux/device-mapper.h>
14 #include <linux/dm-io.h>
15 #include <linux/vmalloc.h>
16 #include <linux/sort.h>
17 #include <linux/rbtree.h>
18 #include <linux/delay.h>
19 #include <linux/random.h>
20 #include <linux/reboot.h>
21 #include <crypto/hash.h>
22 #include <crypto/skcipher.h>
23 #include <linux/async_tx.h>
24 #include <linux/dm-bufio.h>
25 
26 #define DM_MSG_PREFIX "integrity"
27 
28 #define DEFAULT_INTERLEAVE_SECTORS	32768
29 #define DEFAULT_JOURNAL_SIZE_FACTOR	7
30 #define DEFAULT_SECTORS_PER_BITMAP_BIT	32768
31 #define DEFAULT_BUFFER_SECTORS		128
32 #define DEFAULT_JOURNAL_WATERMARK	50
33 #define DEFAULT_SYNC_MSEC		10000
34 #define DEFAULT_MAX_JOURNAL_SECTORS	131072
35 #define MIN_LOG2_INTERLEAVE_SECTORS	3
36 #define MAX_LOG2_INTERLEAVE_SECTORS	31
37 #define METADATA_WORKQUEUE_MAX_ACTIVE	16
38 #define RECALC_SECTORS			8192
39 #define RECALC_WRITE_SUPER		16
40 #define BITMAP_BLOCK_SIZE		4096	/* don't change it */
41 #define BITMAP_FLUSH_INTERVAL		(10 * HZ)
42 #define DISCARD_FILLER			0xf6
43 
44 /*
45  * Warning - DEBUG_PRINT prints security-sensitive data to the log,
46  * so it should not be enabled in the official kernel
47  */
48 //#define DEBUG_PRINT
49 //#define INTERNAL_VERIFY
50 
51 /*
52  * On disk structures
53  */
54 
55 #define SB_MAGIC			"integrt"
56 #define SB_VERSION_1			1
57 #define SB_VERSION_2			2
58 #define SB_VERSION_3			3
59 #define SB_VERSION_4			4
60 #define SB_SECTORS			8
61 #define MAX_SECTORS_PER_BLOCK		8
62 
63 struct superblock {
64 	__u8 magic[8];
65 	__u8 version;
66 	__u8 log2_interleave_sectors;
67 	__u16 integrity_tag_size;
68 	__u32 journal_sections;
69 	__u64 provided_data_sectors;	/* userspace uses this value */
70 	__u32 flags;
71 	__u8 log2_sectors_per_block;
72 	__u8 log2_blocks_per_bitmap_bit;
73 	__u8 pad[2];
74 	__u64 recalc_sector;
75 };
76 
77 #define SB_FLAG_HAVE_JOURNAL_MAC	0x1
78 #define SB_FLAG_RECALCULATING		0x2
79 #define SB_FLAG_DIRTY_BITMAP		0x4
80 #define SB_FLAG_FIXED_PADDING		0x8
81 
82 #define	JOURNAL_ENTRY_ROUNDUP		8
83 
84 typedef __u64 commit_id_t;
85 #define JOURNAL_MAC_PER_SECTOR		8
86 
87 struct journal_entry {
88 	union {
89 		struct {
90 			__u32 sector_lo;
91 			__u32 sector_hi;
92 		} s;
93 		__u64 sector;
94 	} u;
95 	commit_id_t last_bytes[];
96 	/* __u8 tag[0]; */
97 };
98 
99 #define journal_entry_tag(ic, je)		((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
100 
101 #if BITS_PER_LONG == 64
102 #define journal_entry_set_sector(je, x)		do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
103 #else
104 #define journal_entry_set_sector(je, x)		do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
105 #endif
106 #define journal_entry_get_sector(je)		le64_to_cpu((je)->u.sector)
107 #define journal_entry_is_unused(je)		((je)->u.s.sector_hi == cpu_to_le32(-1))
108 #define journal_entry_set_unused(je)		do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
109 #define journal_entry_is_inprogress(je)		((je)->u.s.sector_hi == cpu_to_le32(-2))
110 #define journal_entry_set_inprogress(je)	do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
111 
112 #define JOURNAL_BLOCK_SECTORS		8
113 #define JOURNAL_SECTOR_DATA		((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
114 #define JOURNAL_MAC_SIZE		(JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
115 
116 struct journal_sector {
117 	__u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
118 	__u8 mac[JOURNAL_MAC_PER_SECTOR];
119 	commit_id_t commit_id;
120 };
121 
122 #define MAX_TAG_SIZE			(JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
123 
124 #define METADATA_PADDING_SECTORS	8
125 
126 #define N_COMMIT_IDS			4
127 
128 static unsigned char prev_commit_seq(unsigned char seq)
129 {
130 	return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
131 }
132 
133 static unsigned char next_commit_seq(unsigned char seq)
134 {
135 	return (seq + 1) % N_COMMIT_IDS;
136 }
137 
138 /*
139  * In-memory structures
140  */
141 
142 struct journal_node {
143 	struct rb_node node;
144 	sector_t sector;
145 };
146 
147 struct alg_spec {
148 	char *alg_string;
149 	char *key_string;
150 	__u8 *key;
151 	unsigned key_size;
152 };
153 
154 struct dm_integrity_c {
155 	struct dm_dev *dev;
156 	struct dm_dev *meta_dev;
157 	unsigned tag_size;
158 	__s8 log2_tag_size;
159 	sector_t start;
160 	mempool_t journal_io_mempool;
161 	struct dm_io_client *io;
162 	struct dm_bufio_client *bufio;
163 	struct workqueue_struct *metadata_wq;
164 	struct superblock *sb;
165 	unsigned journal_pages;
166 	unsigned n_bitmap_blocks;
167 
168 	struct page_list *journal;
169 	struct page_list *journal_io;
170 	struct page_list *journal_xor;
171 	struct page_list *recalc_bitmap;
172 	struct page_list *may_write_bitmap;
173 	struct bitmap_block_status *bbs;
174 	unsigned bitmap_flush_interval;
175 	int synchronous_mode;
176 	struct bio_list synchronous_bios;
177 	struct delayed_work bitmap_flush_work;
178 
179 	struct crypto_skcipher *journal_crypt;
180 	struct scatterlist **journal_scatterlist;
181 	struct scatterlist **journal_io_scatterlist;
182 	struct skcipher_request **sk_requests;
183 
184 	struct crypto_shash *journal_mac;
185 
186 	struct journal_node *journal_tree;
187 	struct rb_root journal_tree_root;
188 
189 	sector_t provided_data_sectors;
190 
191 	unsigned short journal_entry_size;
192 	unsigned char journal_entries_per_sector;
193 	unsigned char journal_section_entries;
194 	unsigned short journal_section_sectors;
195 	unsigned journal_sections;
196 	unsigned journal_entries;
197 	sector_t data_device_sectors;
198 	sector_t meta_device_sectors;
199 	unsigned initial_sectors;
200 	unsigned metadata_run;
201 	__s8 log2_metadata_run;
202 	__u8 log2_buffer_sectors;
203 	__u8 sectors_per_block;
204 	__u8 log2_blocks_per_bitmap_bit;
205 
206 	unsigned char mode;
207 
208 	int failed;
209 
210 	struct crypto_shash *internal_hash;
211 
212 	struct dm_target *ti;
213 
214 	/* these variables are locked with endio_wait.lock */
215 	struct rb_root in_progress;
216 	struct list_head wait_list;
217 	wait_queue_head_t endio_wait;
218 	struct workqueue_struct *wait_wq;
219 	struct workqueue_struct *offload_wq;
220 
221 	unsigned char commit_seq;
222 	commit_id_t commit_ids[N_COMMIT_IDS];
223 
224 	unsigned committed_section;
225 	unsigned n_committed_sections;
226 
227 	unsigned uncommitted_section;
228 	unsigned n_uncommitted_sections;
229 
230 	unsigned free_section;
231 	unsigned char free_section_entry;
232 	unsigned free_sectors;
233 
234 	unsigned free_sectors_threshold;
235 
236 	struct workqueue_struct *commit_wq;
237 	struct work_struct commit_work;
238 
239 	struct workqueue_struct *writer_wq;
240 	struct work_struct writer_work;
241 
242 	struct workqueue_struct *recalc_wq;
243 	struct work_struct recalc_work;
244 	u8 *recalc_buffer;
245 	u8 *recalc_tags;
246 
247 	struct bio_list flush_bio_list;
248 
249 	unsigned long autocommit_jiffies;
250 	struct timer_list autocommit_timer;
251 	unsigned autocommit_msec;
252 
253 	wait_queue_head_t copy_to_journal_wait;
254 
255 	struct completion crypto_backoff;
256 
257 	bool journal_uptodate;
258 	bool just_formatted;
259 	bool recalculate_flag;
260 	bool fix_padding;
261 	bool discard;
262 
263 	struct alg_spec internal_hash_alg;
264 	struct alg_spec journal_crypt_alg;
265 	struct alg_spec journal_mac_alg;
266 
267 	atomic64_t number_of_mismatches;
268 
269 	struct notifier_block reboot_notifier;
270 };
271 
272 struct dm_integrity_range {
273 	sector_t logical_sector;
274 	sector_t n_sectors;
275 	bool waiting;
276 	union {
277 		struct rb_node node;
278 		struct {
279 			struct task_struct *task;
280 			struct list_head wait_entry;
281 		};
282 	};
283 };
284 
285 struct dm_integrity_io {
286 	struct work_struct work;
287 
288 	struct dm_integrity_c *ic;
289 	enum req_opf op;
290 	bool fua;
291 
292 	struct dm_integrity_range range;
293 
294 	sector_t metadata_block;
295 	unsigned metadata_offset;
296 
297 	atomic_t in_flight;
298 	blk_status_t bi_status;
299 
300 	struct completion *completion;
301 
302 	struct dm_bio_details bio_details;
303 };
304 
305 struct journal_completion {
306 	struct dm_integrity_c *ic;
307 	atomic_t in_flight;
308 	struct completion comp;
309 };
310 
311 struct journal_io {
312 	struct dm_integrity_range range;
313 	struct journal_completion *comp;
314 };
315 
316 struct bitmap_block_status {
317 	struct work_struct work;
318 	struct dm_integrity_c *ic;
319 	unsigned idx;
320 	unsigned long *bitmap;
321 	struct bio_list bio_queue;
322 	spinlock_t bio_queue_lock;
323 
324 };
325 
326 static struct kmem_cache *journal_io_cache;
327 
328 #define JOURNAL_IO_MEMPOOL	32
329 
330 #ifdef DEBUG_PRINT
331 #define DEBUG_print(x, ...)	printk(KERN_DEBUG x, ##__VA_ARGS__)
332 static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
333 {
334 	va_list args;
335 	va_start(args, msg);
336 	vprintk(msg, args);
337 	va_end(args);
338 	if (len)
339 		pr_cont(":");
340 	while (len) {
341 		pr_cont(" %02x", *bytes);
342 		bytes++;
343 		len--;
344 	}
345 	pr_cont("\n");
346 }
347 #define DEBUG_bytes(bytes, len, msg, ...)	__DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
348 #else
349 #define DEBUG_print(x, ...)			do { } while (0)
350 #define DEBUG_bytes(bytes, len, msg, ...)	do { } while (0)
351 #endif
352 
353 static void dm_integrity_prepare(struct request *rq)
354 {
355 }
356 
357 static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
358 {
359 }
360 
361 /*
362  * DM Integrity profile, protection is performed layer above (dm-crypt)
363  */
364 static const struct blk_integrity_profile dm_integrity_profile = {
365 	.name			= "DM-DIF-EXT-TAG",
366 	.generate_fn		= NULL,
367 	.verify_fn		= NULL,
368 	.prepare_fn		= dm_integrity_prepare,
369 	.complete_fn		= dm_integrity_complete,
370 };
371 
372 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
373 static void integrity_bio_wait(struct work_struct *w);
374 static void dm_integrity_dtr(struct dm_target *ti);
375 
376 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
377 {
378 	if (err == -EILSEQ)
379 		atomic64_inc(&ic->number_of_mismatches);
380 	if (!cmpxchg(&ic->failed, 0, err))
381 		DMERR("Error on %s: %d", msg, err);
382 }
383 
384 static int dm_integrity_failed(struct dm_integrity_c *ic)
385 {
386 	return READ_ONCE(ic->failed);
387 }
388 
389 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
390 					  unsigned j, unsigned char seq)
391 {
392 	/*
393 	 * Xor the number with section and sector, so that if a piece of
394 	 * journal is written at wrong place, it is detected.
395 	 */
396 	return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
397 }
398 
399 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
400 				sector_t *area, sector_t *offset)
401 {
402 	if (!ic->meta_dev) {
403 		__u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
404 		*area = data_sector >> log2_interleave_sectors;
405 		*offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
406 	} else {
407 		*area = 0;
408 		*offset = data_sector;
409 	}
410 }
411 
412 #define sector_to_block(ic, n)						\
413 do {									\
414 	BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1));		\
415 	(n) >>= (ic)->sb->log2_sectors_per_block;			\
416 } while (0)
417 
418 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
419 					    sector_t offset, unsigned *metadata_offset)
420 {
421 	__u64 ms;
422 	unsigned mo;
423 
424 	ms = area << ic->sb->log2_interleave_sectors;
425 	if (likely(ic->log2_metadata_run >= 0))
426 		ms += area << ic->log2_metadata_run;
427 	else
428 		ms += area * ic->metadata_run;
429 	ms >>= ic->log2_buffer_sectors;
430 
431 	sector_to_block(ic, offset);
432 
433 	if (likely(ic->log2_tag_size >= 0)) {
434 		ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
435 		mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
436 	} else {
437 		ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
438 		mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
439 	}
440 	*metadata_offset = mo;
441 	return ms;
442 }
443 
444 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
445 {
446 	sector_t result;
447 
448 	if (ic->meta_dev)
449 		return offset;
450 
451 	result = area << ic->sb->log2_interleave_sectors;
452 	if (likely(ic->log2_metadata_run >= 0))
453 		result += (area + 1) << ic->log2_metadata_run;
454 	else
455 		result += (area + 1) * ic->metadata_run;
456 
457 	result += (sector_t)ic->initial_sectors + offset;
458 	result += ic->start;
459 
460 	return result;
461 }
462 
463 static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
464 {
465 	if (unlikely(*sec_ptr >= ic->journal_sections))
466 		*sec_ptr -= ic->journal_sections;
467 }
468 
469 static void sb_set_version(struct dm_integrity_c *ic)
470 {
471 	if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
472 		ic->sb->version = SB_VERSION_4;
473 	else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
474 		ic->sb->version = SB_VERSION_3;
475 	else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
476 		ic->sb->version = SB_VERSION_2;
477 	else
478 		ic->sb->version = SB_VERSION_1;
479 }
480 
481 static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
482 {
483 	struct dm_io_request io_req;
484 	struct dm_io_region io_loc;
485 
486 	io_req.bi_op = op;
487 	io_req.bi_op_flags = op_flags;
488 	io_req.mem.type = DM_IO_KMEM;
489 	io_req.mem.ptr.addr = ic->sb;
490 	io_req.notify.fn = NULL;
491 	io_req.client = ic->io;
492 	io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
493 	io_loc.sector = ic->start;
494 	io_loc.count = SB_SECTORS;
495 
496 	if (op == REQ_OP_WRITE)
497 		sb_set_version(ic);
498 
499 	return dm_io(&io_req, 1, &io_loc, NULL);
500 }
501 
502 #define BITMAP_OP_TEST_ALL_SET		0
503 #define BITMAP_OP_TEST_ALL_CLEAR	1
504 #define BITMAP_OP_SET			2
505 #define BITMAP_OP_CLEAR			3
506 
507 static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
508 			    sector_t sector, sector_t n_sectors, int mode)
509 {
510 	unsigned long bit, end_bit, this_end_bit, page, end_page;
511 	unsigned long *data;
512 
513 	if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
514 		DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
515 			sector,
516 			n_sectors,
517 			ic->sb->log2_sectors_per_block,
518 			ic->log2_blocks_per_bitmap_bit,
519 			mode);
520 		BUG();
521 	}
522 
523 	if (unlikely(!n_sectors))
524 		return true;
525 
526 	bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
527 	end_bit = (sector + n_sectors - 1) >>
528 		(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
529 
530 	page = bit / (PAGE_SIZE * 8);
531 	bit %= PAGE_SIZE * 8;
532 
533 	end_page = end_bit / (PAGE_SIZE * 8);
534 	end_bit %= PAGE_SIZE * 8;
535 
536 repeat:
537 	if (page < end_page) {
538 		this_end_bit = PAGE_SIZE * 8 - 1;
539 	} else {
540 		this_end_bit = end_bit;
541 	}
542 
543 	data = lowmem_page_address(bitmap[page].page);
544 
545 	if (mode == BITMAP_OP_TEST_ALL_SET) {
546 		while (bit <= this_end_bit) {
547 			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
548 				do {
549 					if (data[bit / BITS_PER_LONG] != -1)
550 						return false;
551 					bit += BITS_PER_LONG;
552 				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
553 				continue;
554 			}
555 			if (!test_bit(bit, data))
556 				return false;
557 			bit++;
558 		}
559 	} else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
560 		while (bit <= this_end_bit) {
561 			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
562 				do {
563 					if (data[bit / BITS_PER_LONG] != 0)
564 						return false;
565 					bit += BITS_PER_LONG;
566 				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
567 				continue;
568 			}
569 			if (test_bit(bit, data))
570 				return false;
571 			bit++;
572 		}
573 	} else if (mode == BITMAP_OP_SET) {
574 		while (bit <= this_end_bit) {
575 			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
576 				do {
577 					data[bit / BITS_PER_LONG] = -1;
578 					bit += BITS_PER_LONG;
579 				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
580 				continue;
581 			}
582 			__set_bit(bit, data);
583 			bit++;
584 		}
585 	} else if (mode == BITMAP_OP_CLEAR) {
586 		if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
587 			clear_page(data);
588 		else while (bit <= this_end_bit) {
589 			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
590 				do {
591 					data[bit / BITS_PER_LONG] = 0;
592 					bit += BITS_PER_LONG;
593 				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
594 				continue;
595 			}
596 			__clear_bit(bit, data);
597 			bit++;
598 		}
599 	} else {
600 		BUG();
601 	}
602 
603 	if (unlikely(page < end_page)) {
604 		bit = 0;
605 		page++;
606 		goto repeat;
607 	}
608 
609 	return true;
610 }
611 
612 static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
613 {
614 	unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
615 	unsigned i;
616 
617 	for (i = 0; i < n_bitmap_pages; i++) {
618 		unsigned long *dst_data = lowmem_page_address(dst[i].page);
619 		unsigned long *src_data = lowmem_page_address(src[i].page);
620 		copy_page(dst_data, src_data);
621 	}
622 }
623 
624 static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
625 {
626 	unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
627 	unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
628 
629 	BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
630 	return &ic->bbs[bitmap_block];
631 }
632 
633 static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
634 				 bool e, const char *function)
635 {
636 #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
637 	unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
638 
639 	if (unlikely(section >= ic->journal_sections) ||
640 	    unlikely(offset >= limit)) {
641 		DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
642 		       function, section, offset, ic->journal_sections, limit);
643 		BUG();
644 	}
645 #endif
646 }
647 
648 static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
649 			       unsigned *pl_index, unsigned *pl_offset)
650 {
651 	unsigned sector;
652 
653 	access_journal_check(ic, section, offset, false, "page_list_location");
654 
655 	sector = section * ic->journal_section_sectors + offset;
656 
657 	*pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
658 	*pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
659 }
660 
661 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
662 					       unsigned section, unsigned offset, unsigned *n_sectors)
663 {
664 	unsigned pl_index, pl_offset;
665 	char *va;
666 
667 	page_list_location(ic, section, offset, &pl_index, &pl_offset);
668 
669 	if (n_sectors)
670 		*n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
671 
672 	va = lowmem_page_address(pl[pl_index].page);
673 
674 	return (struct journal_sector *)(va + pl_offset);
675 }
676 
677 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
678 {
679 	return access_page_list(ic, ic->journal, section, offset, NULL);
680 }
681 
682 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
683 {
684 	unsigned rel_sector, offset;
685 	struct journal_sector *js;
686 
687 	access_journal_check(ic, section, n, true, "access_journal_entry");
688 
689 	rel_sector = n % JOURNAL_BLOCK_SECTORS;
690 	offset = n / JOURNAL_BLOCK_SECTORS;
691 
692 	js = access_journal(ic, section, rel_sector);
693 	return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
694 }
695 
696 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
697 {
698 	n <<= ic->sb->log2_sectors_per_block;
699 
700 	n += JOURNAL_BLOCK_SECTORS;
701 
702 	access_journal_check(ic, section, n, false, "access_journal_data");
703 
704 	return access_journal(ic, section, n);
705 }
706 
707 static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
708 {
709 	SHASH_DESC_ON_STACK(desc, ic->journal_mac);
710 	int r;
711 	unsigned j, size;
712 
713 	desc->tfm = ic->journal_mac;
714 
715 	r = crypto_shash_init(desc);
716 	if (unlikely(r)) {
717 		dm_integrity_io_error(ic, "crypto_shash_init", r);
718 		goto err;
719 	}
720 
721 	for (j = 0; j < ic->journal_section_entries; j++) {
722 		struct journal_entry *je = access_journal_entry(ic, section, j);
723 		r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
724 		if (unlikely(r)) {
725 			dm_integrity_io_error(ic, "crypto_shash_update", r);
726 			goto err;
727 		}
728 	}
729 
730 	size = crypto_shash_digestsize(ic->journal_mac);
731 
732 	if (likely(size <= JOURNAL_MAC_SIZE)) {
733 		r = crypto_shash_final(desc, result);
734 		if (unlikely(r)) {
735 			dm_integrity_io_error(ic, "crypto_shash_final", r);
736 			goto err;
737 		}
738 		memset(result + size, 0, JOURNAL_MAC_SIZE - size);
739 	} else {
740 		__u8 digest[HASH_MAX_DIGESTSIZE];
741 
742 		if (WARN_ON(size > sizeof(digest))) {
743 			dm_integrity_io_error(ic, "digest_size", -EINVAL);
744 			goto err;
745 		}
746 		r = crypto_shash_final(desc, digest);
747 		if (unlikely(r)) {
748 			dm_integrity_io_error(ic, "crypto_shash_final", r);
749 			goto err;
750 		}
751 		memcpy(result, digest, JOURNAL_MAC_SIZE);
752 	}
753 
754 	return;
755 err:
756 	memset(result, 0, JOURNAL_MAC_SIZE);
757 }
758 
759 static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
760 {
761 	__u8 result[JOURNAL_MAC_SIZE];
762 	unsigned j;
763 
764 	if (!ic->journal_mac)
765 		return;
766 
767 	section_mac(ic, section, result);
768 
769 	for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
770 		struct journal_sector *js = access_journal(ic, section, j);
771 
772 		if (likely(wr))
773 			memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
774 		else {
775 			if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
776 				dm_integrity_io_error(ic, "journal mac", -EILSEQ);
777 		}
778 	}
779 }
780 
781 static void complete_journal_op(void *context)
782 {
783 	struct journal_completion *comp = context;
784 	BUG_ON(!atomic_read(&comp->in_flight));
785 	if (likely(atomic_dec_and_test(&comp->in_flight)))
786 		complete(&comp->comp);
787 }
788 
789 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
790 			unsigned n_sections, struct journal_completion *comp)
791 {
792 	struct async_submit_ctl submit;
793 	size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
794 	unsigned pl_index, pl_offset, section_index;
795 	struct page_list *source_pl, *target_pl;
796 
797 	if (likely(encrypt)) {
798 		source_pl = ic->journal;
799 		target_pl = ic->journal_io;
800 	} else {
801 		source_pl = ic->journal_io;
802 		target_pl = ic->journal;
803 	}
804 
805 	page_list_location(ic, section, 0, &pl_index, &pl_offset);
806 
807 	atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
808 
809 	init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
810 
811 	section_index = pl_index;
812 
813 	do {
814 		size_t this_step;
815 		struct page *src_pages[2];
816 		struct page *dst_page;
817 
818 		while (unlikely(pl_index == section_index)) {
819 			unsigned dummy;
820 			if (likely(encrypt))
821 				rw_section_mac(ic, section, true);
822 			section++;
823 			n_sections--;
824 			if (!n_sections)
825 				break;
826 			page_list_location(ic, section, 0, &section_index, &dummy);
827 		}
828 
829 		this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
830 		dst_page = target_pl[pl_index].page;
831 		src_pages[0] = source_pl[pl_index].page;
832 		src_pages[1] = ic->journal_xor[pl_index].page;
833 
834 		async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
835 
836 		pl_index++;
837 		pl_offset = 0;
838 		n_bytes -= this_step;
839 	} while (n_bytes);
840 
841 	BUG_ON(n_sections);
842 
843 	async_tx_issue_pending_all();
844 }
845 
846 static void complete_journal_encrypt(struct crypto_async_request *req, int err)
847 {
848 	struct journal_completion *comp = req->data;
849 	if (unlikely(err)) {
850 		if (likely(err == -EINPROGRESS)) {
851 			complete(&comp->ic->crypto_backoff);
852 			return;
853 		}
854 		dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
855 	}
856 	complete_journal_op(comp);
857 }
858 
859 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
860 {
861 	int r;
862 	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
863 				      complete_journal_encrypt, comp);
864 	if (likely(encrypt))
865 		r = crypto_skcipher_encrypt(req);
866 	else
867 		r = crypto_skcipher_decrypt(req);
868 	if (likely(!r))
869 		return false;
870 	if (likely(r == -EINPROGRESS))
871 		return true;
872 	if (likely(r == -EBUSY)) {
873 		wait_for_completion(&comp->ic->crypto_backoff);
874 		reinit_completion(&comp->ic->crypto_backoff);
875 		return true;
876 	}
877 	dm_integrity_io_error(comp->ic, "encrypt", r);
878 	return false;
879 }
880 
881 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
882 			  unsigned n_sections, struct journal_completion *comp)
883 {
884 	struct scatterlist **source_sg;
885 	struct scatterlist **target_sg;
886 
887 	atomic_add(2, &comp->in_flight);
888 
889 	if (likely(encrypt)) {
890 		source_sg = ic->journal_scatterlist;
891 		target_sg = ic->journal_io_scatterlist;
892 	} else {
893 		source_sg = ic->journal_io_scatterlist;
894 		target_sg = ic->journal_scatterlist;
895 	}
896 
897 	do {
898 		struct skcipher_request *req;
899 		unsigned ivsize;
900 		char *iv;
901 
902 		if (likely(encrypt))
903 			rw_section_mac(ic, section, true);
904 
905 		req = ic->sk_requests[section];
906 		ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
907 		iv = req->iv;
908 
909 		memcpy(iv, iv + ivsize, ivsize);
910 
911 		req->src = source_sg[section];
912 		req->dst = target_sg[section];
913 
914 		if (unlikely(do_crypt(encrypt, req, comp)))
915 			atomic_inc(&comp->in_flight);
916 
917 		section++;
918 		n_sections--;
919 	} while (n_sections);
920 
921 	atomic_dec(&comp->in_flight);
922 	complete_journal_op(comp);
923 }
924 
925 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
926 			    unsigned n_sections, struct journal_completion *comp)
927 {
928 	if (ic->journal_xor)
929 		return xor_journal(ic, encrypt, section, n_sections, comp);
930 	else
931 		return crypt_journal(ic, encrypt, section, n_sections, comp);
932 }
933 
934 static void complete_journal_io(unsigned long error, void *context)
935 {
936 	struct journal_completion *comp = context;
937 	if (unlikely(error != 0))
938 		dm_integrity_io_error(comp->ic, "writing journal", -EIO);
939 	complete_journal_op(comp);
940 }
941 
942 static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
943 			       unsigned sector, unsigned n_sectors, struct journal_completion *comp)
944 {
945 	struct dm_io_request io_req;
946 	struct dm_io_region io_loc;
947 	unsigned pl_index, pl_offset;
948 	int r;
949 
950 	if (unlikely(dm_integrity_failed(ic))) {
951 		if (comp)
952 			complete_journal_io(-1UL, comp);
953 		return;
954 	}
955 
956 	pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
957 	pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
958 
959 	io_req.bi_op = op;
960 	io_req.bi_op_flags = op_flags;
961 	io_req.mem.type = DM_IO_PAGE_LIST;
962 	if (ic->journal_io)
963 		io_req.mem.ptr.pl = &ic->journal_io[pl_index];
964 	else
965 		io_req.mem.ptr.pl = &ic->journal[pl_index];
966 	io_req.mem.offset = pl_offset;
967 	if (likely(comp != NULL)) {
968 		io_req.notify.fn = complete_journal_io;
969 		io_req.notify.context = comp;
970 	} else {
971 		io_req.notify.fn = NULL;
972 	}
973 	io_req.client = ic->io;
974 	io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
975 	io_loc.sector = ic->start + SB_SECTORS + sector;
976 	io_loc.count = n_sectors;
977 
978 	r = dm_io(&io_req, 1, &io_loc, NULL);
979 	if (unlikely(r)) {
980 		dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
981 		if (comp) {
982 			WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
983 			complete_journal_io(-1UL, comp);
984 		}
985 	}
986 }
987 
988 static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
989 		       unsigned n_sections, struct journal_completion *comp)
990 {
991 	unsigned sector, n_sectors;
992 
993 	sector = section * ic->journal_section_sectors;
994 	n_sectors = n_sections * ic->journal_section_sectors;
995 
996 	rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
997 }
998 
999 static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
1000 {
1001 	struct journal_completion io_comp;
1002 	struct journal_completion crypt_comp_1;
1003 	struct journal_completion crypt_comp_2;
1004 	unsigned i;
1005 
1006 	io_comp.ic = ic;
1007 	init_completion(&io_comp.comp);
1008 
1009 	if (commit_start + commit_sections <= ic->journal_sections) {
1010 		io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1011 		if (ic->journal_io) {
1012 			crypt_comp_1.ic = ic;
1013 			init_completion(&crypt_comp_1.comp);
1014 			crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1015 			encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
1016 			wait_for_completion_io(&crypt_comp_1.comp);
1017 		} else {
1018 			for (i = 0; i < commit_sections; i++)
1019 				rw_section_mac(ic, commit_start + i, true);
1020 		}
1021 		rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
1022 			   commit_sections, &io_comp);
1023 	} else {
1024 		unsigned to_end;
1025 		io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
1026 		to_end = ic->journal_sections - commit_start;
1027 		if (ic->journal_io) {
1028 			crypt_comp_1.ic = ic;
1029 			init_completion(&crypt_comp_1.comp);
1030 			crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1031 			encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
1032 			if (try_wait_for_completion(&crypt_comp_1.comp)) {
1033 				rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1034 				reinit_completion(&crypt_comp_1.comp);
1035 				crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1036 				encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
1037 				wait_for_completion_io(&crypt_comp_1.comp);
1038 			} else {
1039 				crypt_comp_2.ic = ic;
1040 				init_completion(&crypt_comp_2.comp);
1041 				crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
1042 				encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
1043 				wait_for_completion_io(&crypt_comp_1.comp);
1044 				rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1045 				wait_for_completion_io(&crypt_comp_2.comp);
1046 			}
1047 		} else {
1048 			for (i = 0; i < to_end; i++)
1049 				rw_section_mac(ic, commit_start + i, true);
1050 			rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1051 			for (i = 0; i < commit_sections - to_end; i++)
1052 				rw_section_mac(ic, i, true);
1053 		}
1054 		rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
1055 	}
1056 
1057 	wait_for_completion_io(&io_comp.comp);
1058 }
1059 
1060 static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
1061 			      unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
1062 {
1063 	struct dm_io_request io_req;
1064 	struct dm_io_region io_loc;
1065 	int r;
1066 	unsigned sector, pl_index, pl_offset;
1067 
1068 	BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
1069 
1070 	if (unlikely(dm_integrity_failed(ic))) {
1071 		fn(-1UL, data);
1072 		return;
1073 	}
1074 
1075 	sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
1076 
1077 	pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1078 	pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1079 
1080 	io_req.bi_op = REQ_OP_WRITE;
1081 	io_req.bi_op_flags = 0;
1082 	io_req.mem.type = DM_IO_PAGE_LIST;
1083 	io_req.mem.ptr.pl = &ic->journal[pl_index];
1084 	io_req.mem.offset = pl_offset;
1085 	io_req.notify.fn = fn;
1086 	io_req.notify.context = data;
1087 	io_req.client = ic->io;
1088 	io_loc.bdev = ic->dev->bdev;
1089 	io_loc.sector = target;
1090 	io_loc.count = n_sectors;
1091 
1092 	r = dm_io(&io_req, 1, &io_loc, NULL);
1093 	if (unlikely(r)) {
1094 		WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1095 		fn(-1UL, data);
1096 	}
1097 }
1098 
1099 static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1100 {
1101 	return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
1102 	       range1->logical_sector + range1->n_sectors > range2->logical_sector;
1103 }
1104 
1105 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1106 {
1107 	struct rb_node **n = &ic->in_progress.rb_node;
1108 	struct rb_node *parent;
1109 
1110 	BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
1111 
1112 	if (likely(check_waiting)) {
1113 		struct dm_integrity_range *range;
1114 		list_for_each_entry(range, &ic->wait_list, wait_entry) {
1115 			if (unlikely(ranges_overlap(range, new_range)))
1116 				return false;
1117 		}
1118 	}
1119 
1120 	parent = NULL;
1121 
1122 	while (*n) {
1123 		struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
1124 
1125 		parent = *n;
1126 		if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
1127 			n = &range->node.rb_left;
1128 		} else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
1129 			n = &range->node.rb_right;
1130 		} else {
1131 			return false;
1132 		}
1133 	}
1134 
1135 	rb_link_node(&new_range->node, parent, n);
1136 	rb_insert_color(&new_range->node, &ic->in_progress);
1137 
1138 	return true;
1139 }
1140 
1141 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1142 {
1143 	rb_erase(&range->node, &ic->in_progress);
1144 	while (unlikely(!list_empty(&ic->wait_list))) {
1145 		struct dm_integrity_range *last_range =
1146 			list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1147 		struct task_struct *last_range_task;
1148 		last_range_task = last_range->task;
1149 		list_del(&last_range->wait_entry);
1150 		if (!add_new_range(ic, last_range, false)) {
1151 			last_range->task = last_range_task;
1152 			list_add(&last_range->wait_entry, &ic->wait_list);
1153 			break;
1154 		}
1155 		last_range->waiting = false;
1156 		wake_up_process(last_range_task);
1157 	}
1158 }
1159 
1160 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1161 {
1162 	unsigned long flags;
1163 
1164 	spin_lock_irqsave(&ic->endio_wait.lock, flags);
1165 	remove_range_unlocked(ic, range);
1166 	spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1167 }
1168 
1169 static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1170 {
1171 	new_range->waiting = true;
1172 	list_add_tail(&new_range->wait_entry, &ic->wait_list);
1173 	new_range->task = current;
1174 	do {
1175 		__set_current_state(TASK_UNINTERRUPTIBLE);
1176 		spin_unlock_irq(&ic->endio_wait.lock);
1177 		io_schedule();
1178 		spin_lock_irq(&ic->endio_wait.lock);
1179 	} while (unlikely(new_range->waiting));
1180 }
1181 
1182 static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1183 {
1184 	if (unlikely(!add_new_range(ic, new_range, true)))
1185 		wait_and_add_new_range(ic, new_range);
1186 }
1187 
1188 static void init_journal_node(struct journal_node *node)
1189 {
1190 	RB_CLEAR_NODE(&node->node);
1191 	node->sector = (sector_t)-1;
1192 }
1193 
1194 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1195 {
1196 	struct rb_node **link;
1197 	struct rb_node *parent;
1198 
1199 	node->sector = sector;
1200 	BUG_ON(!RB_EMPTY_NODE(&node->node));
1201 
1202 	link = &ic->journal_tree_root.rb_node;
1203 	parent = NULL;
1204 
1205 	while (*link) {
1206 		struct journal_node *j;
1207 		parent = *link;
1208 		j = container_of(parent, struct journal_node, node);
1209 		if (sector < j->sector)
1210 			link = &j->node.rb_left;
1211 		else
1212 			link = &j->node.rb_right;
1213 	}
1214 
1215 	rb_link_node(&node->node, parent, link);
1216 	rb_insert_color(&node->node, &ic->journal_tree_root);
1217 }
1218 
1219 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1220 {
1221 	BUG_ON(RB_EMPTY_NODE(&node->node));
1222 	rb_erase(&node->node, &ic->journal_tree_root);
1223 	init_journal_node(node);
1224 }
1225 
1226 #define NOT_FOUND	(-1U)
1227 
1228 static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1229 {
1230 	struct rb_node *n = ic->journal_tree_root.rb_node;
1231 	unsigned found = NOT_FOUND;
1232 	*next_sector = (sector_t)-1;
1233 	while (n) {
1234 		struct journal_node *j = container_of(n, struct journal_node, node);
1235 		if (sector == j->sector) {
1236 			found = j - ic->journal_tree;
1237 		}
1238 		if (sector < j->sector) {
1239 			*next_sector = j->sector;
1240 			n = j->node.rb_left;
1241 		} else {
1242 			n = j->node.rb_right;
1243 		}
1244 	}
1245 
1246 	return found;
1247 }
1248 
1249 static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
1250 {
1251 	struct journal_node *node, *next_node;
1252 	struct rb_node *next;
1253 
1254 	if (unlikely(pos >= ic->journal_entries))
1255 		return false;
1256 	node = &ic->journal_tree[pos];
1257 	if (unlikely(RB_EMPTY_NODE(&node->node)))
1258 		return false;
1259 	if (unlikely(node->sector != sector))
1260 		return false;
1261 
1262 	next = rb_next(&node->node);
1263 	if (unlikely(!next))
1264 		return true;
1265 
1266 	next_node = container_of(next, struct journal_node, node);
1267 	return next_node->sector != sector;
1268 }
1269 
1270 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1271 {
1272 	struct rb_node *next;
1273 	struct journal_node *next_node;
1274 	unsigned next_section;
1275 
1276 	BUG_ON(RB_EMPTY_NODE(&node->node));
1277 
1278 	next = rb_next(&node->node);
1279 	if (unlikely(!next))
1280 		return false;
1281 
1282 	next_node = container_of(next, struct journal_node, node);
1283 
1284 	if (next_node->sector != node->sector)
1285 		return false;
1286 
1287 	next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1288 	if (next_section >= ic->committed_section &&
1289 	    next_section < ic->committed_section + ic->n_committed_sections)
1290 		return true;
1291 	if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1292 		return true;
1293 
1294 	return false;
1295 }
1296 
1297 #define TAG_READ	0
1298 #define TAG_WRITE	1
1299 #define TAG_CMP		2
1300 
1301 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1302 			       unsigned *metadata_offset, unsigned total_size, int op)
1303 {
1304 #define MAY_BE_FILLER		1
1305 #define MAY_BE_HASH		2
1306 	unsigned hash_offset = 0;
1307 	unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1308 
1309 	do {
1310 		unsigned char *data, *dp;
1311 		struct dm_buffer *b;
1312 		unsigned to_copy;
1313 		int r;
1314 
1315 		r = dm_integrity_failed(ic);
1316 		if (unlikely(r))
1317 			return r;
1318 
1319 		data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1320 		if (IS_ERR(data))
1321 			return PTR_ERR(data);
1322 
1323 		to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1324 		dp = data + *metadata_offset;
1325 		if (op == TAG_READ) {
1326 			memcpy(tag, dp, to_copy);
1327 		} else if (op == TAG_WRITE) {
1328 			memcpy(dp, tag, to_copy);
1329 			dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
1330 		} else {
1331 			/* e.g.: op == TAG_CMP */
1332 
1333 			if (likely(is_power_of_2(ic->tag_size))) {
1334 				if (unlikely(memcmp(dp, tag, to_copy)))
1335 					if (unlikely(!ic->discard) ||
1336 					    unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
1337 						goto thorough_test;
1338 				}
1339 			} else {
1340 				unsigned i, ts;
1341 thorough_test:
1342 				ts = total_size;
1343 
1344 				for (i = 0; i < to_copy; i++, ts--) {
1345 					if (unlikely(dp[i] != tag[i]))
1346 						may_be &= ~MAY_BE_HASH;
1347 					if (likely(dp[i] != DISCARD_FILLER))
1348 						may_be &= ~MAY_BE_FILLER;
1349 					hash_offset++;
1350 					if (unlikely(hash_offset == ic->tag_size)) {
1351 						if (unlikely(!may_be)) {
1352 							dm_bufio_release(b);
1353 							return ts;
1354 						}
1355 						hash_offset = 0;
1356 						may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1357 					}
1358 				}
1359 			}
1360 		}
1361 		dm_bufio_release(b);
1362 
1363 		tag += to_copy;
1364 		*metadata_offset += to_copy;
1365 		if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1366 			(*metadata_block)++;
1367 			*metadata_offset = 0;
1368 		}
1369 
1370 		if (unlikely(!is_power_of_2(ic->tag_size))) {
1371 			hash_offset = (hash_offset + to_copy) % ic->tag_size;
1372 		}
1373 
1374 		total_size -= to_copy;
1375 	} while (unlikely(total_size));
1376 
1377 	return 0;
1378 #undef MAY_BE_FILLER
1379 #undef MAY_BE_HASH
1380 }
1381 
1382 struct flush_request {
1383 	struct dm_io_request io_req;
1384 	struct dm_io_region io_reg;
1385 	struct dm_integrity_c *ic;
1386 	struct completion comp;
1387 };
1388 
1389 static void flush_notify(unsigned long error, void *fr_)
1390 {
1391 	struct flush_request *fr = fr_;
1392 	if (unlikely(error != 0))
1393 		dm_integrity_io_error(fr->ic, "flusing disk cache", -EIO);
1394 	complete(&fr->comp);
1395 }
1396 
1397 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
1398 {
1399 	int r;
1400 
1401 	struct flush_request fr;
1402 
1403 	if (!ic->meta_dev)
1404 		flush_data = false;
1405 	if (flush_data) {
1406 		fr.io_req.bi_op = REQ_OP_WRITE,
1407 		fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
1408 		fr.io_req.mem.type = DM_IO_KMEM,
1409 		fr.io_req.mem.ptr.addr = NULL,
1410 		fr.io_req.notify.fn = flush_notify,
1411 		fr.io_req.notify.context = &fr;
1412 		fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
1413 		fr.io_reg.bdev = ic->dev->bdev,
1414 		fr.io_reg.sector = 0,
1415 		fr.io_reg.count = 0,
1416 		fr.ic = ic;
1417 		init_completion(&fr.comp);
1418 		r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
1419 		BUG_ON(r);
1420 	}
1421 
1422 	r = dm_bufio_write_dirty_buffers(ic->bufio);
1423 	if (unlikely(r))
1424 		dm_integrity_io_error(ic, "writing tags", r);
1425 
1426 	if (flush_data)
1427 		wait_for_completion(&fr.comp);
1428 }
1429 
1430 static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1431 {
1432 	DECLARE_WAITQUEUE(wait, current);
1433 	__add_wait_queue(&ic->endio_wait, &wait);
1434 	__set_current_state(TASK_UNINTERRUPTIBLE);
1435 	spin_unlock_irq(&ic->endio_wait.lock);
1436 	io_schedule();
1437 	spin_lock_irq(&ic->endio_wait.lock);
1438 	__remove_wait_queue(&ic->endio_wait, &wait);
1439 }
1440 
1441 static void autocommit_fn(struct timer_list *t)
1442 {
1443 	struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1444 
1445 	if (likely(!dm_integrity_failed(ic)))
1446 		queue_work(ic->commit_wq, &ic->commit_work);
1447 }
1448 
1449 static void schedule_autocommit(struct dm_integrity_c *ic)
1450 {
1451 	if (!timer_pending(&ic->autocommit_timer))
1452 		mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1453 }
1454 
1455 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1456 {
1457 	struct bio *bio;
1458 	unsigned long flags;
1459 
1460 	spin_lock_irqsave(&ic->endio_wait.lock, flags);
1461 	bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1462 	bio_list_add(&ic->flush_bio_list, bio);
1463 	spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1464 
1465 	queue_work(ic->commit_wq, &ic->commit_work);
1466 }
1467 
1468 static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1469 {
1470 	int r = dm_integrity_failed(ic);
1471 	if (unlikely(r) && !bio->bi_status)
1472 		bio->bi_status = errno_to_blk_status(r);
1473 	if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
1474 		unsigned long flags;
1475 		spin_lock_irqsave(&ic->endio_wait.lock, flags);
1476 		bio_list_add(&ic->synchronous_bios, bio);
1477 		queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
1478 		spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1479 		return;
1480 	}
1481 	bio_endio(bio);
1482 }
1483 
1484 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1485 {
1486 	struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1487 
1488 	if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1489 		submit_flush_bio(ic, dio);
1490 	else
1491 		do_endio(ic, bio);
1492 }
1493 
1494 static void dec_in_flight(struct dm_integrity_io *dio)
1495 {
1496 	if (atomic_dec_and_test(&dio->in_flight)) {
1497 		struct dm_integrity_c *ic = dio->ic;
1498 		struct bio *bio;
1499 
1500 		remove_range(ic, &dio->range);
1501 
1502 		if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
1503 			schedule_autocommit(ic);
1504 
1505 		bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1506 
1507 		if (unlikely(dio->bi_status) && !bio->bi_status)
1508 			bio->bi_status = dio->bi_status;
1509 		if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1510 			dio->range.logical_sector += dio->range.n_sectors;
1511 			bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1512 			INIT_WORK(&dio->work, integrity_bio_wait);
1513 			queue_work(ic->offload_wq, &dio->work);
1514 			return;
1515 		}
1516 		do_endio_flush(ic, dio);
1517 	}
1518 }
1519 
1520 static void integrity_end_io(struct bio *bio)
1521 {
1522 	struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1523 
1524 	dm_bio_restore(&dio->bio_details, bio);
1525 	if (bio->bi_integrity)
1526 		bio->bi_opf |= REQ_INTEGRITY;
1527 
1528 	if (dio->completion)
1529 		complete(dio->completion);
1530 
1531 	dec_in_flight(dio);
1532 }
1533 
1534 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1535 				      const char *data, char *result)
1536 {
1537 	__u64 sector_le = cpu_to_le64(sector);
1538 	SHASH_DESC_ON_STACK(req, ic->internal_hash);
1539 	int r;
1540 	unsigned digest_size;
1541 
1542 	req->tfm = ic->internal_hash;
1543 
1544 	r = crypto_shash_init(req);
1545 	if (unlikely(r < 0)) {
1546 		dm_integrity_io_error(ic, "crypto_shash_init", r);
1547 		goto failed;
1548 	}
1549 
1550 	r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
1551 	if (unlikely(r < 0)) {
1552 		dm_integrity_io_error(ic, "crypto_shash_update", r);
1553 		goto failed;
1554 	}
1555 
1556 	r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1557 	if (unlikely(r < 0)) {
1558 		dm_integrity_io_error(ic, "crypto_shash_update", r);
1559 		goto failed;
1560 	}
1561 
1562 	r = crypto_shash_final(req, result);
1563 	if (unlikely(r < 0)) {
1564 		dm_integrity_io_error(ic, "crypto_shash_final", r);
1565 		goto failed;
1566 	}
1567 
1568 	digest_size = crypto_shash_digestsize(ic->internal_hash);
1569 	if (unlikely(digest_size < ic->tag_size))
1570 		memset(result + digest_size, 0, ic->tag_size - digest_size);
1571 
1572 	return;
1573 
1574 failed:
1575 	/* this shouldn't happen anyway, the hash functions have no reason to fail */
1576 	get_random_bytes(result, ic->tag_size);
1577 }
1578 
1579 static void integrity_metadata(struct work_struct *w)
1580 {
1581 	struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1582 	struct dm_integrity_c *ic = dio->ic;
1583 
1584 	int r;
1585 
1586 	if (ic->internal_hash) {
1587 		struct bvec_iter iter;
1588 		struct bio_vec bv;
1589 		unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1590 		struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1591 		char *checksums;
1592 		unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1593 		char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1594 		sector_t sector;
1595 		unsigned sectors_to_process;
1596 
1597 		if (unlikely(ic->mode == 'R'))
1598 			goto skip_io;
1599 
1600 		if (likely(dio->op != REQ_OP_DISCARD))
1601 			checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1602 					    GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1603 		else
1604 			checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1605 		if (!checksums) {
1606 			checksums = checksums_onstack;
1607 			if (WARN_ON(extra_space &&
1608 				    digest_size > sizeof(checksums_onstack))) {
1609 				r = -EINVAL;
1610 				goto error;
1611 			}
1612 		}
1613 
1614 		if (unlikely(dio->op == REQ_OP_DISCARD)) {
1615 			sector_t bi_sector = dio->bio_details.bi_iter.bi_sector;
1616 			unsigned bi_size = dio->bio_details.bi_iter.bi_size;
1617 			unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
1618 			unsigned max_blocks = max_size / ic->tag_size;
1619 			memset(checksums, DISCARD_FILLER, max_size);
1620 
1621 			while (bi_size) {
1622 				unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1623 				this_step_blocks = min(this_step_blocks, max_blocks);
1624 				r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1625 							this_step_blocks * ic->tag_size, TAG_WRITE);
1626 				if (unlikely(r)) {
1627 					if (likely(checksums != checksums_onstack))
1628 						kfree(checksums);
1629 					goto error;
1630 				}
1631 
1632 				/*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
1633 					printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
1634 					printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
1635 					BUG();
1636 				}*/
1637 				bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1638 				bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
1639 			}
1640 
1641 			if (likely(checksums != checksums_onstack))
1642 				kfree(checksums);
1643 			goto skip_io;
1644 		}
1645 
1646 		sector = dio->range.logical_sector;
1647 		sectors_to_process = dio->range.n_sectors;
1648 
1649 		__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1650 			unsigned pos;
1651 			char *mem, *checksums_ptr;
1652 
1653 again:
1654 			mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1655 			pos = 0;
1656 			checksums_ptr = checksums;
1657 			do {
1658 				integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1659 				checksums_ptr += ic->tag_size;
1660 				sectors_to_process -= ic->sectors_per_block;
1661 				pos += ic->sectors_per_block << SECTOR_SHIFT;
1662 				sector += ic->sectors_per_block;
1663 			} while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1664 			kunmap_atomic(mem);
1665 
1666 			r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1667 						checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
1668 			if (unlikely(r)) {
1669 				if (r > 0) {
1670 					char b[BDEVNAME_SIZE];
1671 					DMERR_LIMIT("%s: Checksum failed at sector 0x%llx", bio_devname(bio, b),
1672 						    (sector - ((r + ic->tag_size - 1) / ic->tag_size)));
1673 					r = -EILSEQ;
1674 					atomic64_inc(&ic->number_of_mismatches);
1675 				}
1676 				if (likely(checksums != checksums_onstack))
1677 					kfree(checksums);
1678 				goto error;
1679 			}
1680 
1681 			if (!sectors_to_process)
1682 				break;
1683 
1684 			if (unlikely(pos < bv.bv_len)) {
1685 				bv.bv_offset += pos;
1686 				bv.bv_len -= pos;
1687 				goto again;
1688 			}
1689 		}
1690 
1691 		if (likely(checksums != checksums_onstack))
1692 			kfree(checksums);
1693 	} else {
1694 		struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
1695 
1696 		if (bip) {
1697 			struct bio_vec biv;
1698 			struct bvec_iter iter;
1699 			unsigned data_to_process = dio->range.n_sectors;
1700 			sector_to_block(ic, data_to_process);
1701 			data_to_process *= ic->tag_size;
1702 
1703 			bip_for_each_vec(biv, bip, iter) {
1704 				unsigned char *tag;
1705 				unsigned this_len;
1706 
1707 				BUG_ON(PageHighMem(biv.bv_page));
1708 				tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1709 				this_len = min(biv.bv_len, data_to_process);
1710 				r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1711 							this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
1712 				if (unlikely(r))
1713 					goto error;
1714 				data_to_process -= this_len;
1715 				if (!data_to_process)
1716 					break;
1717 			}
1718 		}
1719 	}
1720 skip_io:
1721 	dec_in_flight(dio);
1722 	return;
1723 error:
1724 	dio->bi_status = errno_to_blk_status(r);
1725 	dec_in_flight(dio);
1726 }
1727 
1728 static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1729 {
1730 	struct dm_integrity_c *ic = ti->private;
1731 	struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1732 	struct bio_integrity_payload *bip;
1733 
1734 	sector_t area, offset;
1735 
1736 	dio->ic = ic;
1737 	dio->bi_status = 0;
1738 	dio->op = bio_op(bio);
1739 
1740 	if (unlikely(dio->op == REQ_OP_DISCARD)) {
1741 		if (ti->max_io_len) {
1742 			sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
1743 			unsigned log2_max_io_len = __fls(ti->max_io_len);
1744 			sector_t start_boundary = sec >> log2_max_io_len;
1745 			sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
1746 			if (start_boundary < end_boundary) {
1747 				sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
1748 				dm_accept_partial_bio(bio, len);
1749 			}
1750 		}
1751 	}
1752 
1753 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1754 		submit_flush_bio(ic, dio);
1755 		return DM_MAPIO_SUBMITTED;
1756 	}
1757 
1758 	dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1759 	dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
1760 	if (unlikely(dio->fua)) {
1761 		/*
1762 		 * Don't pass down the FUA flag because we have to flush
1763 		 * disk cache anyway.
1764 		 */
1765 		bio->bi_opf &= ~REQ_FUA;
1766 	}
1767 	if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1768 		DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1769 		      dio->range.logical_sector, bio_sectors(bio),
1770 		      ic->provided_data_sectors);
1771 		return DM_MAPIO_KILL;
1772 	}
1773 	if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1774 		DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1775 		      ic->sectors_per_block,
1776 		      dio->range.logical_sector, bio_sectors(bio));
1777 		return DM_MAPIO_KILL;
1778 	}
1779 
1780 	if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
1781 		struct bvec_iter iter;
1782 		struct bio_vec bv;
1783 		bio_for_each_segment(bv, bio, iter) {
1784 			if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1785 				DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1786 					bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1787 				return DM_MAPIO_KILL;
1788 			}
1789 		}
1790 	}
1791 
1792 	bip = bio_integrity(bio);
1793 	if (!ic->internal_hash) {
1794 		if (bip) {
1795 			unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1796 			if (ic->log2_tag_size >= 0)
1797 				wanted_tag_size <<= ic->log2_tag_size;
1798 			else
1799 				wanted_tag_size *= ic->tag_size;
1800 			if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1801 				DMERR("Invalid integrity data size %u, expected %u",
1802 				      bip->bip_iter.bi_size, wanted_tag_size);
1803 				return DM_MAPIO_KILL;
1804 			}
1805 		}
1806 	} else {
1807 		if (unlikely(bip != NULL)) {
1808 			DMERR("Unexpected integrity data when using internal hash");
1809 			return DM_MAPIO_KILL;
1810 		}
1811 	}
1812 
1813 	if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
1814 		return DM_MAPIO_KILL;
1815 
1816 	get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1817 	dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1818 	bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1819 
1820 	dm_integrity_map_continue(dio, true);
1821 	return DM_MAPIO_SUBMITTED;
1822 }
1823 
1824 static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1825 				 unsigned journal_section, unsigned journal_entry)
1826 {
1827 	struct dm_integrity_c *ic = dio->ic;
1828 	sector_t logical_sector;
1829 	unsigned n_sectors;
1830 
1831 	logical_sector = dio->range.logical_sector;
1832 	n_sectors = dio->range.n_sectors;
1833 	do {
1834 		struct bio_vec bv = bio_iovec(bio);
1835 		char *mem;
1836 
1837 		if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1838 			bv.bv_len = n_sectors << SECTOR_SHIFT;
1839 		n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1840 		bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1841 retry_kmap:
1842 		mem = kmap_atomic(bv.bv_page);
1843 		if (likely(dio->op == REQ_OP_WRITE))
1844 			flush_dcache_page(bv.bv_page);
1845 
1846 		do {
1847 			struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1848 
1849 			if (unlikely(dio->op == REQ_OP_READ)) {
1850 				struct journal_sector *js;
1851 				char *mem_ptr;
1852 				unsigned s;
1853 
1854 				if (unlikely(journal_entry_is_inprogress(je))) {
1855 					flush_dcache_page(bv.bv_page);
1856 					kunmap_atomic(mem);
1857 
1858 					__io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1859 					goto retry_kmap;
1860 				}
1861 				smp_rmb();
1862 				BUG_ON(journal_entry_get_sector(je) != logical_sector);
1863 				js = access_journal_data(ic, journal_section, journal_entry);
1864 				mem_ptr = mem + bv.bv_offset;
1865 				s = 0;
1866 				do {
1867 					memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1868 					*(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1869 					js++;
1870 					mem_ptr += 1 << SECTOR_SHIFT;
1871 				} while (++s < ic->sectors_per_block);
1872 #ifdef INTERNAL_VERIFY
1873 				if (ic->internal_hash) {
1874 					char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1875 
1876 					integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1877 					if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
1878 						DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
1879 							    logical_sector);
1880 					}
1881 				}
1882 #endif
1883 			}
1884 
1885 			if (!ic->internal_hash) {
1886 				struct bio_integrity_payload *bip = bio_integrity(bio);
1887 				unsigned tag_todo = ic->tag_size;
1888 				char *tag_ptr = journal_entry_tag(ic, je);
1889 
1890 				if (bip) do {
1891 					struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1892 					unsigned tag_now = min(biv.bv_len, tag_todo);
1893 					char *tag_addr;
1894 					BUG_ON(PageHighMem(biv.bv_page));
1895 					tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1896 					if (likely(dio->op == REQ_OP_WRITE))
1897 						memcpy(tag_ptr, tag_addr, tag_now);
1898 					else
1899 						memcpy(tag_addr, tag_ptr, tag_now);
1900 					bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
1901 					tag_ptr += tag_now;
1902 					tag_todo -= tag_now;
1903 				} while (unlikely(tag_todo)); else {
1904 					if (likely(dio->op == REQ_OP_WRITE))
1905 						memset(tag_ptr, 0, tag_todo);
1906 				}
1907 			}
1908 
1909 			if (likely(dio->op == REQ_OP_WRITE)) {
1910 				struct journal_sector *js;
1911 				unsigned s;
1912 
1913 				js = access_journal_data(ic, journal_section, journal_entry);
1914 				memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
1915 
1916 				s = 0;
1917 				do {
1918 					je->last_bytes[s] = js[s].commit_id;
1919 				} while (++s < ic->sectors_per_block);
1920 
1921 				if (ic->internal_hash) {
1922 					unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1923 					if (unlikely(digest_size > ic->tag_size)) {
1924 						char checksums_onstack[HASH_MAX_DIGESTSIZE];
1925 						integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
1926 						memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
1927 					} else
1928 						integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
1929 				}
1930 
1931 				journal_entry_set_sector(je, logical_sector);
1932 			}
1933 			logical_sector += ic->sectors_per_block;
1934 
1935 			journal_entry++;
1936 			if (unlikely(journal_entry == ic->journal_section_entries)) {
1937 				journal_entry = 0;
1938 				journal_section++;
1939 				wraparound_section(ic, &journal_section);
1940 			}
1941 
1942 			bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
1943 		} while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
1944 
1945 		if (unlikely(dio->op == REQ_OP_READ))
1946 			flush_dcache_page(bv.bv_page);
1947 		kunmap_atomic(mem);
1948 	} while (n_sectors);
1949 
1950 	if (likely(dio->op == REQ_OP_WRITE)) {
1951 		smp_mb();
1952 		if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1953 			wake_up(&ic->copy_to_journal_wait);
1954 		if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
1955 			queue_work(ic->commit_wq, &ic->commit_work);
1956 		} else {
1957 			schedule_autocommit(ic);
1958 		}
1959 	} else {
1960 		remove_range(ic, &dio->range);
1961 	}
1962 
1963 	if (unlikely(bio->bi_iter.bi_size)) {
1964 		sector_t area, offset;
1965 
1966 		dio->range.logical_sector = logical_sector;
1967 		get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1968 		dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1969 		return true;
1970 	}
1971 
1972 	return false;
1973 }
1974 
1975 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
1976 {
1977 	struct dm_integrity_c *ic = dio->ic;
1978 	struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1979 	unsigned journal_section, journal_entry;
1980 	unsigned journal_read_pos;
1981 	struct completion read_comp;
1982 	bool discard_retried = false;
1983 	bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
1984 	if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
1985 		need_sync_io = true;
1986 
1987 	if (need_sync_io && from_map) {
1988 		INIT_WORK(&dio->work, integrity_bio_wait);
1989 		queue_work(ic->offload_wq, &dio->work);
1990 		return;
1991 	}
1992 
1993 lock_retry:
1994 	spin_lock_irq(&ic->endio_wait.lock);
1995 retry:
1996 	if (unlikely(dm_integrity_failed(ic))) {
1997 		spin_unlock_irq(&ic->endio_wait.lock);
1998 		do_endio(ic, bio);
1999 		return;
2000 	}
2001 	dio->range.n_sectors = bio_sectors(bio);
2002 	journal_read_pos = NOT_FOUND;
2003 	if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
2004 		if (dio->op == REQ_OP_WRITE) {
2005 			unsigned next_entry, i, pos;
2006 			unsigned ws, we, range_sectors;
2007 
2008 			dio->range.n_sectors = min(dio->range.n_sectors,
2009 						   (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
2010 			if (unlikely(!dio->range.n_sectors)) {
2011 				if (from_map)
2012 					goto offload_to_thread;
2013 				sleep_on_endio_wait(ic);
2014 				goto retry;
2015 			}
2016 			range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
2017 			ic->free_sectors -= range_sectors;
2018 			journal_section = ic->free_section;
2019 			journal_entry = ic->free_section_entry;
2020 
2021 			next_entry = ic->free_section_entry + range_sectors;
2022 			ic->free_section_entry = next_entry % ic->journal_section_entries;
2023 			ic->free_section += next_entry / ic->journal_section_entries;
2024 			ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
2025 			wraparound_section(ic, &ic->free_section);
2026 
2027 			pos = journal_section * ic->journal_section_entries + journal_entry;
2028 			ws = journal_section;
2029 			we = journal_entry;
2030 			i = 0;
2031 			do {
2032 				struct journal_entry *je;
2033 
2034 				add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
2035 				pos++;
2036 				if (unlikely(pos >= ic->journal_entries))
2037 					pos = 0;
2038 
2039 				je = access_journal_entry(ic, ws, we);
2040 				BUG_ON(!journal_entry_is_unused(je));
2041 				journal_entry_set_inprogress(je);
2042 				we++;
2043 				if (unlikely(we == ic->journal_section_entries)) {
2044 					we = 0;
2045 					ws++;
2046 					wraparound_section(ic, &ws);
2047 				}
2048 			} while ((i += ic->sectors_per_block) < dio->range.n_sectors);
2049 
2050 			spin_unlock_irq(&ic->endio_wait.lock);
2051 			goto journal_read_write;
2052 		} else {
2053 			sector_t next_sector;
2054 			journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2055 			if (likely(journal_read_pos == NOT_FOUND)) {
2056 				if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
2057 					dio->range.n_sectors = next_sector - dio->range.logical_sector;
2058 			} else {
2059 				unsigned i;
2060 				unsigned jp = journal_read_pos + 1;
2061 				for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
2062 					if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
2063 						break;
2064 				}
2065 				dio->range.n_sectors = i;
2066 			}
2067 		}
2068 	}
2069 	if (unlikely(!add_new_range(ic, &dio->range, true))) {
2070 		/*
2071 		 * We must not sleep in the request routine because it could
2072 		 * stall bios on current->bio_list.
2073 		 * So, we offload the bio to a workqueue if we have to sleep.
2074 		 */
2075 		if (from_map) {
2076 offload_to_thread:
2077 			spin_unlock_irq(&ic->endio_wait.lock);
2078 			INIT_WORK(&dio->work, integrity_bio_wait);
2079 			queue_work(ic->wait_wq, &dio->work);
2080 			return;
2081 		}
2082 		if (journal_read_pos != NOT_FOUND)
2083 			dio->range.n_sectors = ic->sectors_per_block;
2084 		wait_and_add_new_range(ic, &dio->range);
2085 		/*
2086 		 * wait_and_add_new_range drops the spinlock, so the journal
2087 		 * may have been changed arbitrarily. We need to recheck.
2088 		 * To simplify the code, we restrict I/O size to just one block.
2089 		 */
2090 		if (journal_read_pos != NOT_FOUND) {
2091 			sector_t next_sector;
2092 			unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2093 			if (unlikely(new_pos != journal_read_pos)) {
2094 				remove_range_unlocked(ic, &dio->range);
2095 				goto retry;
2096 			}
2097 		}
2098 	}
2099 	if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
2100 		sector_t next_sector;
2101 		unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2102 		if (unlikely(new_pos != NOT_FOUND) ||
2103 		    unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
2104 			remove_range_unlocked(ic, &dio->range);
2105 			spin_unlock_irq(&ic->endio_wait.lock);
2106 			queue_work(ic->commit_wq, &ic->commit_work);
2107 			flush_workqueue(ic->commit_wq);
2108 			queue_work(ic->writer_wq, &ic->writer_work);
2109 			flush_workqueue(ic->writer_wq);
2110 			discard_retried = true;
2111 			goto lock_retry;
2112 		}
2113 	}
2114 	spin_unlock_irq(&ic->endio_wait.lock);
2115 
2116 	if (unlikely(journal_read_pos != NOT_FOUND)) {
2117 		journal_section = journal_read_pos / ic->journal_section_entries;
2118 		journal_entry = journal_read_pos % ic->journal_section_entries;
2119 		goto journal_read_write;
2120 	}
2121 
2122 	if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
2123 		if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2124 				     dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2125 			struct bitmap_block_status *bbs;
2126 
2127 			bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
2128 			spin_lock(&bbs->bio_queue_lock);
2129 			bio_list_add(&bbs->bio_queue, bio);
2130 			spin_unlock(&bbs->bio_queue_lock);
2131 			queue_work(ic->writer_wq, &bbs->work);
2132 			return;
2133 		}
2134 	}
2135 
2136 	dio->in_flight = (atomic_t)ATOMIC_INIT(2);
2137 
2138 	if (need_sync_io) {
2139 		init_completion(&read_comp);
2140 		dio->completion = &read_comp;
2141 	} else
2142 		dio->completion = NULL;
2143 
2144 	dm_bio_record(&dio->bio_details, bio);
2145 	bio_set_dev(bio, ic->dev->bdev);
2146 	bio->bi_integrity = NULL;
2147 	bio->bi_opf &= ~REQ_INTEGRITY;
2148 	bio->bi_end_io = integrity_end_io;
2149 	bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
2150 
2151 	if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
2152 		integrity_metadata(&dio->work);
2153 		dm_integrity_flush_buffers(ic, false);
2154 
2155 		dio->in_flight = (atomic_t)ATOMIC_INIT(1);
2156 		dio->completion = NULL;
2157 
2158 		submit_bio_noacct(bio);
2159 
2160 		return;
2161 	}
2162 
2163 	submit_bio_noacct(bio);
2164 
2165 	if (need_sync_io) {
2166 		wait_for_completion_io(&read_comp);
2167 		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
2168 		    dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
2169 			goto skip_check;
2170 		if (ic->mode == 'B') {
2171 			if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2172 					     dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2173 				goto skip_check;
2174 		}
2175 
2176 		if (likely(!bio->bi_status))
2177 			integrity_metadata(&dio->work);
2178 		else
2179 skip_check:
2180 			dec_in_flight(dio);
2181 
2182 	} else {
2183 		INIT_WORK(&dio->work, integrity_metadata);
2184 		queue_work(ic->metadata_wq, &dio->work);
2185 	}
2186 
2187 	return;
2188 
2189 journal_read_write:
2190 	if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
2191 		goto lock_retry;
2192 
2193 	do_endio_flush(ic, dio);
2194 }
2195 
2196 
2197 static void integrity_bio_wait(struct work_struct *w)
2198 {
2199 	struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2200 
2201 	dm_integrity_map_continue(dio, false);
2202 }
2203 
2204 static void pad_uncommitted(struct dm_integrity_c *ic)
2205 {
2206 	if (ic->free_section_entry) {
2207 		ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
2208 		ic->free_section_entry = 0;
2209 		ic->free_section++;
2210 		wraparound_section(ic, &ic->free_section);
2211 		ic->n_uncommitted_sections++;
2212 	}
2213 	if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
2214 		    (ic->n_uncommitted_sections + ic->n_committed_sections) *
2215 		    ic->journal_section_entries + ic->free_sectors)) {
2216 		DMCRIT("journal_sections %u, journal_section_entries %u, "
2217 		       "n_uncommitted_sections %u, n_committed_sections %u, "
2218 		       "journal_section_entries %u, free_sectors %u",
2219 		       ic->journal_sections, ic->journal_section_entries,
2220 		       ic->n_uncommitted_sections, ic->n_committed_sections,
2221 		       ic->journal_section_entries, ic->free_sectors);
2222 	}
2223 }
2224 
2225 static void integrity_commit(struct work_struct *w)
2226 {
2227 	struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
2228 	unsigned commit_start, commit_sections;
2229 	unsigned i, j, n;
2230 	struct bio *flushes;
2231 
2232 	del_timer(&ic->autocommit_timer);
2233 
2234 	spin_lock_irq(&ic->endio_wait.lock);
2235 	flushes = bio_list_get(&ic->flush_bio_list);
2236 	if (unlikely(ic->mode != 'J')) {
2237 		spin_unlock_irq(&ic->endio_wait.lock);
2238 		dm_integrity_flush_buffers(ic, true);
2239 		goto release_flush_bios;
2240 	}
2241 
2242 	pad_uncommitted(ic);
2243 	commit_start = ic->uncommitted_section;
2244 	commit_sections = ic->n_uncommitted_sections;
2245 	spin_unlock_irq(&ic->endio_wait.lock);
2246 
2247 	if (!commit_sections)
2248 		goto release_flush_bios;
2249 
2250 	i = commit_start;
2251 	for (n = 0; n < commit_sections; n++) {
2252 		for (j = 0; j < ic->journal_section_entries; j++) {
2253 			struct journal_entry *je;
2254 			je = access_journal_entry(ic, i, j);
2255 			io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2256 		}
2257 		for (j = 0; j < ic->journal_section_sectors; j++) {
2258 			struct journal_sector *js;
2259 			js = access_journal(ic, i, j);
2260 			js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
2261 		}
2262 		i++;
2263 		if (unlikely(i >= ic->journal_sections))
2264 			ic->commit_seq = next_commit_seq(ic->commit_seq);
2265 		wraparound_section(ic, &i);
2266 	}
2267 	smp_rmb();
2268 
2269 	write_journal(ic, commit_start, commit_sections);
2270 
2271 	spin_lock_irq(&ic->endio_wait.lock);
2272 	ic->uncommitted_section += commit_sections;
2273 	wraparound_section(ic, &ic->uncommitted_section);
2274 	ic->n_uncommitted_sections -= commit_sections;
2275 	ic->n_committed_sections += commit_sections;
2276 	spin_unlock_irq(&ic->endio_wait.lock);
2277 
2278 	if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2279 		queue_work(ic->writer_wq, &ic->writer_work);
2280 
2281 release_flush_bios:
2282 	while (flushes) {
2283 		struct bio *next = flushes->bi_next;
2284 		flushes->bi_next = NULL;
2285 		do_endio(ic, flushes);
2286 		flushes = next;
2287 	}
2288 }
2289 
2290 static void complete_copy_from_journal(unsigned long error, void *context)
2291 {
2292 	struct journal_io *io = context;
2293 	struct journal_completion *comp = io->comp;
2294 	struct dm_integrity_c *ic = comp->ic;
2295 	remove_range(ic, &io->range);
2296 	mempool_free(io, &ic->journal_io_mempool);
2297 	if (unlikely(error != 0))
2298 		dm_integrity_io_error(ic, "copying from journal", -EIO);
2299 	complete_journal_op(comp);
2300 }
2301 
2302 static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
2303 			       struct journal_entry *je)
2304 {
2305 	unsigned s = 0;
2306 	do {
2307 		js->commit_id = je->last_bytes[s];
2308 		js++;
2309 	} while (++s < ic->sectors_per_block);
2310 }
2311 
2312 static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
2313 			     unsigned write_sections, bool from_replay)
2314 {
2315 	unsigned i, j, n;
2316 	struct journal_completion comp;
2317 	struct blk_plug plug;
2318 
2319 	blk_start_plug(&plug);
2320 
2321 	comp.ic = ic;
2322 	comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2323 	init_completion(&comp.comp);
2324 
2325 	i = write_start;
2326 	for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
2327 #ifndef INTERNAL_VERIFY
2328 		if (unlikely(from_replay))
2329 #endif
2330 			rw_section_mac(ic, i, false);
2331 		for (j = 0; j < ic->journal_section_entries; j++) {
2332 			struct journal_entry *je = access_journal_entry(ic, i, j);
2333 			sector_t sec, area, offset;
2334 			unsigned k, l, next_loop;
2335 			sector_t metadata_block;
2336 			unsigned metadata_offset;
2337 			struct journal_io *io;
2338 
2339 			if (journal_entry_is_unused(je))
2340 				continue;
2341 			BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
2342 			sec = journal_entry_get_sector(je);
2343 			if (unlikely(from_replay)) {
2344 				if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
2345 					dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
2346 					sec &= ~(sector_t)(ic->sectors_per_block - 1);
2347 				}
2348 			}
2349 			if (unlikely(sec >= ic->provided_data_sectors))
2350 				continue;
2351 			get_area_and_offset(ic, sec, &area, &offset);
2352 			restore_last_bytes(ic, access_journal_data(ic, i, j), je);
2353 			for (k = j + 1; k < ic->journal_section_entries; k++) {
2354 				struct journal_entry *je2 = access_journal_entry(ic, i, k);
2355 				sector_t sec2, area2, offset2;
2356 				if (journal_entry_is_unused(je2))
2357 					break;
2358 				BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
2359 				sec2 = journal_entry_get_sector(je2);
2360 				if (unlikely(sec2 >= ic->provided_data_sectors))
2361 					break;
2362 				get_area_and_offset(ic, sec2, &area2, &offset2);
2363 				if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
2364 					break;
2365 				restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
2366 			}
2367 			next_loop = k - 1;
2368 
2369 			io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
2370 			io->comp = &comp;
2371 			io->range.logical_sector = sec;
2372 			io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
2373 
2374 			spin_lock_irq(&ic->endio_wait.lock);
2375 			add_new_range_and_wait(ic, &io->range);
2376 
2377 			if (likely(!from_replay)) {
2378 				struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2379 
2380 				/* don't write if there is newer committed sector */
2381 				while (j < k && find_newer_committed_node(ic, &section_node[j])) {
2382 					struct journal_entry *je2 = access_journal_entry(ic, i, j);
2383 
2384 					journal_entry_set_unused(je2);
2385 					remove_journal_node(ic, &section_node[j]);
2386 					j++;
2387 					sec += ic->sectors_per_block;
2388 					offset += ic->sectors_per_block;
2389 				}
2390 				while (j < k && find_newer_committed_node(ic, &section_node[k - 1])) {
2391 					struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2392 
2393 					journal_entry_set_unused(je2);
2394 					remove_journal_node(ic, &section_node[k - 1]);
2395 					k--;
2396 				}
2397 				if (j == k) {
2398 					remove_range_unlocked(ic, &io->range);
2399 					spin_unlock_irq(&ic->endio_wait.lock);
2400 					mempool_free(io, &ic->journal_io_mempool);
2401 					goto skip_io;
2402 				}
2403 				for (l = j; l < k; l++) {
2404 					remove_journal_node(ic, &section_node[l]);
2405 				}
2406 			}
2407 			spin_unlock_irq(&ic->endio_wait.lock);
2408 
2409 			metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2410 			for (l = j; l < k; l++) {
2411 				int r;
2412 				struct journal_entry *je2 = access_journal_entry(ic, i, l);
2413 
2414 				if (
2415 #ifndef INTERNAL_VERIFY
2416 				    unlikely(from_replay) &&
2417 #endif
2418 				    ic->internal_hash) {
2419 					char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2420 
2421 					integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2422 								  (char *)access_journal_data(ic, i, l), test_tag);
2423 					if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
2424 						dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2425 				}
2426 
2427 				journal_entry_set_unused(je2);
2428 				r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2429 							ic->tag_size, TAG_WRITE);
2430 				if (unlikely(r)) {
2431 					dm_integrity_io_error(ic, "reading tags", r);
2432 				}
2433 			}
2434 
2435 			atomic_inc(&comp.in_flight);
2436 			copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2437 					  (k - j) << ic->sb->log2_sectors_per_block,
2438 					  get_data_sector(ic, area, offset),
2439 					  complete_copy_from_journal, io);
2440 skip_io:
2441 			j = next_loop;
2442 		}
2443 	}
2444 
2445 	dm_bufio_write_dirty_buffers_async(ic->bufio);
2446 
2447 	blk_finish_plug(&plug);
2448 
2449 	complete_journal_op(&comp);
2450 	wait_for_completion_io(&comp.comp);
2451 
2452 	dm_integrity_flush_buffers(ic, true);
2453 }
2454 
2455 static void integrity_writer(struct work_struct *w)
2456 {
2457 	struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2458 	unsigned write_start, write_sections;
2459 
2460 	unsigned prev_free_sectors;
2461 
2462 	/* the following test is not needed, but it tests the replay code */
2463 	if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
2464 		return;
2465 
2466 	spin_lock_irq(&ic->endio_wait.lock);
2467 	write_start = ic->committed_section;
2468 	write_sections = ic->n_committed_sections;
2469 	spin_unlock_irq(&ic->endio_wait.lock);
2470 
2471 	if (!write_sections)
2472 		return;
2473 
2474 	do_journal_write(ic, write_start, write_sections, false);
2475 
2476 	spin_lock_irq(&ic->endio_wait.lock);
2477 
2478 	ic->committed_section += write_sections;
2479 	wraparound_section(ic, &ic->committed_section);
2480 	ic->n_committed_sections -= write_sections;
2481 
2482 	prev_free_sectors = ic->free_sectors;
2483 	ic->free_sectors += write_sections * ic->journal_section_entries;
2484 	if (unlikely(!prev_free_sectors))
2485 		wake_up_locked(&ic->endio_wait);
2486 
2487 	spin_unlock_irq(&ic->endio_wait.lock);
2488 }
2489 
2490 static void recalc_write_super(struct dm_integrity_c *ic)
2491 {
2492 	int r;
2493 
2494 	dm_integrity_flush_buffers(ic, false);
2495 	if (dm_integrity_failed(ic))
2496 		return;
2497 
2498 	r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
2499 	if (unlikely(r))
2500 		dm_integrity_io_error(ic, "writing superblock", r);
2501 }
2502 
2503 static void integrity_recalc(struct work_struct *w)
2504 {
2505 	struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2506 	struct dm_integrity_range range;
2507 	struct dm_io_request io_req;
2508 	struct dm_io_region io_loc;
2509 	sector_t area, offset;
2510 	sector_t metadata_block;
2511 	unsigned metadata_offset;
2512 	sector_t logical_sector, n_sectors;
2513 	__u8 *t;
2514 	unsigned i;
2515 	int r;
2516 	unsigned super_counter = 0;
2517 
2518 	DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
2519 
2520 	spin_lock_irq(&ic->endio_wait.lock);
2521 
2522 next_chunk:
2523 
2524 	if (unlikely(dm_post_suspending(ic->ti)))
2525 		goto unlock_ret;
2526 
2527 	range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2528 	if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2529 		if (ic->mode == 'B') {
2530 			block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
2531 			DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2532 			queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2533 		}
2534 		goto unlock_ret;
2535 	}
2536 
2537 	get_area_and_offset(ic, range.logical_sector, &area, &offset);
2538 	range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
2539 	if (!ic->meta_dev)
2540 		range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
2541 
2542 	add_new_range_and_wait(ic, &range);
2543 	spin_unlock_irq(&ic->endio_wait.lock);
2544 	logical_sector = range.logical_sector;
2545 	n_sectors = range.n_sectors;
2546 
2547 	if (ic->mode == 'B') {
2548 		if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
2549 			goto advance_and_next;
2550 		}
2551 		while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
2552 				       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2553 			logical_sector += ic->sectors_per_block;
2554 			n_sectors -= ic->sectors_per_block;
2555 			cond_resched();
2556 		}
2557 		while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
2558 				       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2559 			n_sectors -= ic->sectors_per_block;
2560 			cond_resched();
2561 		}
2562 		get_area_and_offset(ic, logical_sector, &area, &offset);
2563 	}
2564 
2565 	DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors);
2566 
2567 	if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2568 		recalc_write_super(ic);
2569 		if (ic->mode == 'B') {
2570 			queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2571 		}
2572 		super_counter = 0;
2573 	}
2574 
2575 	if (unlikely(dm_integrity_failed(ic)))
2576 		goto err;
2577 
2578 	io_req.bi_op = REQ_OP_READ;
2579 	io_req.bi_op_flags = 0;
2580 	io_req.mem.type = DM_IO_VMA;
2581 	io_req.mem.ptr.addr = ic->recalc_buffer;
2582 	io_req.notify.fn = NULL;
2583 	io_req.client = ic->io;
2584 	io_loc.bdev = ic->dev->bdev;
2585 	io_loc.sector = get_data_sector(ic, area, offset);
2586 	io_loc.count = n_sectors;
2587 
2588 	r = dm_io(&io_req, 1, &io_loc, NULL);
2589 	if (unlikely(r)) {
2590 		dm_integrity_io_error(ic, "reading data", r);
2591 		goto err;
2592 	}
2593 
2594 	t = ic->recalc_tags;
2595 	for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
2596 		integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
2597 		t += ic->tag_size;
2598 	}
2599 
2600 	metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2601 
2602 	r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
2603 	if (unlikely(r)) {
2604 		dm_integrity_io_error(ic, "writing tags", r);
2605 		goto err;
2606 	}
2607 
2608 	if (ic->mode == 'B') {
2609 		sector_t start, end;
2610 		start = (range.logical_sector >>
2611 			 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2612 			(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2613 		end = ((range.logical_sector + range.n_sectors) >>
2614 		       (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2615 			(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2616 		block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
2617 	}
2618 
2619 advance_and_next:
2620 	cond_resched();
2621 
2622 	spin_lock_irq(&ic->endio_wait.lock);
2623 	remove_range_unlocked(ic, &range);
2624 	ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2625 	goto next_chunk;
2626 
2627 err:
2628 	remove_range(ic, &range);
2629 	return;
2630 
2631 unlock_ret:
2632 	spin_unlock_irq(&ic->endio_wait.lock);
2633 
2634 	recalc_write_super(ic);
2635 }
2636 
2637 static void bitmap_block_work(struct work_struct *w)
2638 {
2639 	struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
2640 	struct dm_integrity_c *ic = bbs->ic;
2641 	struct bio *bio;
2642 	struct bio_list bio_queue;
2643 	struct bio_list waiting;
2644 
2645 	bio_list_init(&waiting);
2646 
2647 	spin_lock(&bbs->bio_queue_lock);
2648 	bio_queue = bbs->bio_queue;
2649 	bio_list_init(&bbs->bio_queue);
2650 	spin_unlock(&bbs->bio_queue_lock);
2651 
2652 	while ((bio = bio_list_pop(&bio_queue))) {
2653 		struct dm_integrity_io *dio;
2654 
2655 		dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2656 
2657 		if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2658 				    dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2659 			remove_range(ic, &dio->range);
2660 			INIT_WORK(&dio->work, integrity_bio_wait);
2661 			queue_work(ic->offload_wq, &dio->work);
2662 		} else {
2663 			block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2664 					dio->range.n_sectors, BITMAP_OP_SET);
2665 			bio_list_add(&waiting, bio);
2666 		}
2667 	}
2668 
2669 	if (bio_list_empty(&waiting))
2670 		return;
2671 
2672 	rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
2673 			   bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
2674 			   BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
2675 
2676 	while ((bio = bio_list_pop(&waiting))) {
2677 		struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2678 
2679 		block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2680 				dio->range.n_sectors, BITMAP_OP_SET);
2681 
2682 		remove_range(ic, &dio->range);
2683 		INIT_WORK(&dio->work, integrity_bio_wait);
2684 		queue_work(ic->offload_wq, &dio->work);
2685 	}
2686 
2687 	queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2688 }
2689 
2690 static void bitmap_flush_work(struct work_struct *work)
2691 {
2692 	struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
2693 	struct dm_integrity_range range;
2694 	unsigned long limit;
2695 	struct bio *bio;
2696 
2697 	dm_integrity_flush_buffers(ic, false);
2698 
2699 	range.logical_sector = 0;
2700 	range.n_sectors = ic->provided_data_sectors;
2701 
2702 	spin_lock_irq(&ic->endio_wait.lock);
2703 	add_new_range_and_wait(ic, &range);
2704 	spin_unlock_irq(&ic->endio_wait.lock);
2705 
2706 	dm_integrity_flush_buffers(ic, true);
2707 
2708 	limit = ic->provided_data_sectors;
2709 	if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2710 		limit = le64_to_cpu(ic->sb->recalc_sector)
2711 			>> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
2712 			<< (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2713 	}
2714 	/*DEBUG_print("zeroing journal\n");*/
2715 	block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
2716 	block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
2717 
2718 	rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2719 			   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2720 
2721 	spin_lock_irq(&ic->endio_wait.lock);
2722 	remove_range_unlocked(ic, &range);
2723 	while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
2724 		bio_endio(bio);
2725 		spin_unlock_irq(&ic->endio_wait.lock);
2726 		spin_lock_irq(&ic->endio_wait.lock);
2727 	}
2728 	spin_unlock_irq(&ic->endio_wait.lock);
2729 }
2730 
2731 
2732 static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2733 			 unsigned n_sections, unsigned char commit_seq)
2734 {
2735 	unsigned i, j, n;
2736 
2737 	if (!n_sections)
2738 		return;
2739 
2740 	for (n = 0; n < n_sections; n++) {
2741 		i = start_section + n;
2742 		wraparound_section(ic, &i);
2743 		for (j = 0; j < ic->journal_section_sectors; j++) {
2744 			struct journal_sector *js = access_journal(ic, i, j);
2745 			memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
2746 			js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2747 		}
2748 		for (j = 0; j < ic->journal_section_entries; j++) {
2749 			struct journal_entry *je = access_journal_entry(ic, i, j);
2750 			journal_entry_set_unused(je);
2751 		}
2752 	}
2753 
2754 	write_journal(ic, start_section, n_sections);
2755 }
2756 
2757 static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2758 {
2759 	unsigned char k;
2760 	for (k = 0; k < N_COMMIT_IDS; k++) {
2761 		if (dm_integrity_commit_id(ic, i, j, k) == id)
2762 			return k;
2763 	}
2764 	dm_integrity_io_error(ic, "journal commit id", -EIO);
2765 	return -EIO;
2766 }
2767 
2768 static void replay_journal(struct dm_integrity_c *ic)
2769 {
2770 	unsigned i, j;
2771 	bool used_commit_ids[N_COMMIT_IDS];
2772 	unsigned max_commit_id_sections[N_COMMIT_IDS];
2773 	unsigned write_start, write_sections;
2774 	unsigned continue_section;
2775 	bool journal_empty;
2776 	unsigned char unused, last_used, want_commit_seq;
2777 
2778 	if (ic->mode == 'R')
2779 		return;
2780 
2781 	if (ic->journal_uptodate)
2782 		return;
2783 
2784 	last_used = 0;
2785 	write_start = 0;
2786 
2787 	if (!ic->just_formatted) {
2788 		DEBUG_print("reading journal\n");
2789 		rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2790 		if (ic->journal_io)
2791 			DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2792 		if (ic->journal_io) {
2793 			struct journal_completion crypt_comp;
2794 			crypt_comp.ic = ic;
2795 			init_completion(&crypt_comp.comp);
2796 			crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2797 			encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2798 			wait_for_completion(&crypt_comp.comp);
2799 		}
2800 		DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2801 	}
2802 
2803 	if (dm_integrity_failed(ic))
2804 		goto clear_journal;
2805 
2806 	journal_empty = true;
2807 	memset(used_commit_ids, 0, sizeof used_commit_ids);
2808 	memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2809 	for (i = 0; i < ic->journal_sections; i++) {
2810 		for (j = 0; j < ic->journal_section_sectors; j++) {
2811 			int k;
2812 			struct journal_sector *js = access_journal(ic, i, j);
2813 			k = find_commit_seq(ic, i, j, js->commit_id);
2814 			if (k < 0)
2815 				goto clear_journal;
2816 			used_commit_ids[k] = true;
2817 			max_commit_id_sections[k] = i;
2818 		}
2819 		if (journal_empty) {
2820 			for (j = 0; j < ic->journal_section_entries; j++) {
2821 				struct journal_entry *je = access_journal_entry(ic, i, j);
2822 				if (!journal_entry_is_unused(je)) {
2823 					journal_empty = false;
2824 					break;
2825 				}
2826 			}
2827 		}
2828 	}
2829 
2830 	if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2831 		unused = N_COMMIT_IDS - 1;
2832 		while (unused && !used_commit_ids[unused - 1])
2833 			unused--;
2834 	} else {
2835 		for (unused = 0; unused < N_COMMIT_IDS; unused++)
2836 			if (!used_commit_ids[unused])
2837 				break;
2838 		if (unused == N_COMMIT_IDS) {
2839 			dm_integrity_io_error(ic, "journal commit ids", -EIO);
2840 			goto clear_journal;
2841 		}
2842 	}
2843 	DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2844 		    unused, used_commit_ids[0], used_commit_ids[1],
2845 		    used_commit_ids[2], used_commit_ids[3]);
2846 
2847 	last_used = prev_commit_seq(unused);
2848 	want_commit_seq = prev_commit_seq(last_used);
2849 
2850 	if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2851 		journal_empty = true;
2852 
2853 	write_start = max_commit_id_sections[last_used] + 1;
2854 	if (unlikely(write_start >= ic->journal_sections))
2855 		want_commit_seq = next_commit_seq(want_commit_seq);
2856 	wraparound_section(ic, &write_start);
2857 
2858 	i = write_start;
2859 	for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2860 		for (j = 0; j < ic->journal_section_sectors; j++) {
2861 			struct journal_sector *js = access_journal(ic, i, j);
2862 
2863 			if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2864 				/*
2865 				 * This could be caused by crash during writing.
2866 				 * We won't replay the inconsistent part of the
2867 				 * journal.
2868 				 */
2869 				DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2870 					    i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2871 				goto brk;
2872 			}
2873 		}
2874 		i++;
2875 		if (unlikely(i >= ic->journal_sections))
2876 			want_commit_seq = next_commit_seq(want_commit_seq);
2877 		wraparound_section(ic, &i);
2878 	}
2879 brk:
2880 
2881 	if (!journal_empty) {
2882 		DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2883 			    write_sections, write_start, want_commit_seq);
2884 		do_journal_write(ic, write_start, write_sections, true);
2885 	}
2886 
2887 	if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2888 		continue_section = write_start;
2889 		ic->commit_seq = want_commit_seq;
2890 		DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2891 	} else {
2892 		unsigned s;
2893 		unsigned char erase_seq;
2894 clear_journal:
2895 		DEBUG_print("clearing journal\n");
2896 
2897 		erase_seq = prev_commit_seq(prev_commit_seq(last_used));
2898 		s = write_start;
2899 		init_journal(ic, s, 1, erase_seq);
2900 		s++;
2901 		wraparound_section(ic, &s);
2902 		if (ic->journal_sections >= 2) {
2903 			init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2904 			s += ic->journal_sections - 2;
2905 			wraparound_section(ic, &s);
2906 			init_journal(ic, s, 1, erase_seq);
2907 		}
2908 
2909 		continue_section = 0;
2910 		ic->commit_seq = next_commit_seq(erase_seq);
2911 	}
2912 
2913 	ic->committed_section = continue_section;
2914 	ic->n_committed_sections = 0;
2915 
2916 	ic->uncommitted_section = continue_section;
2917 	ic->n_uncommitted_sections = 0;
2918 
2919 	ic->free_section = continue_section;
2920 	ic->free_section_entry = 0;
2921 	ic->free_sectors = ic->journal_entries;
2922 
2923 	ic->journal_tree_root = RB_ROOT;
2924 	for (i = 0; i < ic->journal_entries; i++)
2925 		init_journal_node(&ic->journal_tree[i]);
2926 }
2927 
2928 static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
2929 {
2930 	DEBUG_print("dm_integrity_enter_synchronous_mode\n");
2931 
2932 	if (ic->mode == 'B') {
2933 		ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
2934 		ic->synchronous_mode = 1;
2935 
2936 		cancel_delayed_work_sync(&ic->bitmap_flush_work);
2937 		queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2938 		flush_workqueue(ic->commit_wq);
2939 	}
2940 }
2941 
2942 static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
2943 {
2944 	struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
2945 
2946 	DEBUG_print("dm_integrity_reboot\n");
2947 
2948 	dm_integrity_enter_synchronous_mode(ic);
2949 
2950 	return NOTIFY_DONE;
2951 }
2952 
2953 static void dm_integrity_postsuspend(struct dm_target *ti)
2954 {
2955 	struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2956 	int r;
2957 
2958 	WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
2959 
2960 	del_timer_sync(&ic->autocommit_timer);
2961 
2962 	if (ic->recalc_wq)
2963 		drain_workqueue(ic->recalc_wq);
2964 
2965 	if (ic->mode == 'B')
2966 		cancel_delayed_work_sync(&ic->bitmap_flush_work);
2967 
2968 	queue_work(ic->commit_wq, &ic->commit_work);
2969 	drain_workqueue(ic->commit_wq);
2970 
2971 	if (ic->mode == 'J') {
2972 		if (ic->meta_dev)
2973 			queue_work(ic->writer_wq, &ic->writer_work);
2974 		drain_workqueue(ic->writer_wq);
2975 		dm_integrity_flush_buffers(ic, true);
2976 	}
2977 
2978 	if (ic->mode == 'B') {
2979 		dm_integrity_flush_buffers(ic, true);
2980 #if 1
2981 		/* set to 0 to test bitmap replay code */
2982 		init_journal(ic, 0, ic->journal_sections, 0);
2983 		ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
2984 		r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2985 		if (unlikely(r))
2986 			dm_integrity_io_error(ic, "writing superblock", r);
2987 #endif
2988 	}
2989 
2990 	BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
2991 
2992 	ic->journal_uptodate = true;
2993 }
2994 
2995 static void dm_integrity_resume(struct dm_target *ti)
2996 {
2997 	struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2998 	__u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
2999 	int r;
3000 
3001 	DEBUG_print("resume\n");
3002 
3003 	if (ic->provided_data_sectors != old_provided_data_sectors) {
3004 		if (ic->provided_data_sectors > old_provided_data_sectors &&
3005 		    ic->mode == 'B' &&
3006 		    ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
3007 			rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
3008 					   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3009 			block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
3010 					ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
3011 			rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3012 					   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3013 		}
3014 
3015 		ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3016 		r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3017 		if (unlikely(r))
3018 			dm_integrity_io_error(ic, "writing superblock", r);
3019 	}
3020 
3021 	if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
3022 		DEBUG_print("resume dirty_bitmap\n");
3023 		rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
3024 				   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3025 		if (ic->mode == 'B') {
3026 			if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
3027 				block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
3028 				block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
3029 				if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
3030 						     BITMAP_OP_TEST_ALL_CLEAR)) {
3031 					ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3032 					ic->sb->recalc_sector = cpu_to_le64(0);
3033 				}
3034 			} else {
3035 				DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
3036 					    ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
3037 				ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3038 				block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3039 				block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3040 				block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3041 				rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3042 						   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3043 				ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3044 				ic->sb->recalc_sector = cpu_to_le64(0);
3045 			}
3046 		} else {
3047 			if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3048 			      block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) {
3049 				ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3050 				ic->sb->recalc_sector = cpu_to_le64(0);
3051 			}
3052 			init_journal(ic, 0, ic->journal_sections, 0);
3053 			replay_journal(ic);
3054 			ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3055 		}
3056 		r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3057 		if (unlikely(r))
3058 			dm_integrity_io_error(ic, "writing superblock", r);
3059 	} else {
3060 		replay_journal(ic);
3061 		if (ic->mode == 'B') {
3062 			ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3063 			ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3064 			r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3065 			if (unlikely(r))
3066 				dm_integrity_io_error(ic, "writing superblock", r);
3067 
3068 			block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3069 			block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3070 			block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3071 			if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
3072 			    le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
3073 				block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
3074 						ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3075 				block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3076 						ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3077 				block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3078 						ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3079 			}
3080 			rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3081 					   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3082 		}
3083 	}
3084 
3085 	DEBUG_print("testing recalc: %x\n", ic->sb->flags);
3086 	if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3087 		__u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
3088 		DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
3089 		if (recalc_pos < ic->provided_data_sectors) {
3090 			queue_work(ic->recalc_wq, &ic->recalc_work);
3091 		} else if (recalc_pos > ic->provided_data_sectors) {
3092 			ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
3093 			recalc_write_super(ic);
3094 		}
3095 	}
3096 
3097 	ic->reboot_notifier.notifier_call = dm_integrity_reboot;
3098 	ic->reboot_notifier.next = NULL;
3099 	ic->reboot_notifier.priority = INT_MAX - 1;	/* be notified after md and before hardware drivers */
3100 	WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
3101 
3102 #if 0
3103 	/* set to 1 to stress test synchronous mode */
3104 	dm_integrity_enter_synchronous_mode(ic);
3105 #endif
3106 }
3107 
3108 static void dm_integrity_status(struct dm_target *ti, status_type_t type,
3109 				unsigned status_flags, char *result, unsigned maxlen)
3110 {
3111 	struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3112 	unsigned arg_count;
3113 	size_t sz = 0;
3114 
3115 	switch (type) {
3116 	case STATUSTYPE_INFO:
3117 		DMEMIT("%llu %llu",
3118 			(unsigned long long)atomic64_read(&ic->number_of_mismatches),
3119 			ic->provided_data_sectors);
3120 		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3121 			DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
3122 		else
3123 			DMEMIT(" -");
3124 		break;
3125 
3126 	case STATUSTYPE_TABLE: {
3127 		__u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
3128 		watermark_percentage += ic->journal_entries / 2;
3129 		do_div(watermark_percentage, ic->journal_entries);
3130 		arg_count = 3;
3131 		arg_count += !!ic->meta_dev;
3132 		arg_count += ic->sectors_per_block != 1;
3133 		arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
3134 		arg_count += ic->discard;
3135 		arg_count += ic->mode == 'J';
3136 		arg_count += ic->mode == 'J';
3137 		arg_count += ic->mode == 'B';
3138 		arg_count += ic->mode == 'B';
3139 		arg_count += !!ic->internal_hash_alg.alg_string;
3140 		arg_count += !!ic->journal_crypt_alg.alg_string;
3141 		arg_count += !!ic->journal_mac_alg.alg_string;
3142 		arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
3143 		DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
3144 		       ic->tag_size, ic->mode, arg_count);
3145 		if (ic->meta_dev)
3146 			DMEMIT(" meta_device:%s", ic->meta_dev->name);
3147 		if (ic->sectors_per_block != 1)
3148 			DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
3149 		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3150 			DMEMIT(" recalculate");
3151 		if (ic->discard)
3152 			DMEMIT(" allow_discards");
3153 		DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
3154 		DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
3155 		DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
3156 		if (ic->mode == 'J') {
3157 			DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
3158 			DMEMIT(" commit_time:%u", ic->autocommit_msec);
3159 		}
3160 		if (ic->mode == 'B') {
3161 			DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
3162 			DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
3163 		}
3164 		if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
3165 			DMEMIT(" fix_padding");
3166 
3167 #define EMIT_ALG(a, n)							\
3168 		do {							\
3169 			if (ic->a.alg_string) {				\
3170 				DMEMIT(" %s:%s", n, ic->a.alg_string);	\
3171 				if (ic->a.key_string)			\
3172 					DMEMIT(":%s", ic->a.key_string);\
3173 			}						\
3174 		} while (0)
3175 		EMIT_ALG(internal_hash_alg, "internal_hash");
3176 		EMIT_ALG(journal_crypt_alg, "journal_crypt");
3177 		EMIT_ALG(journal_mac_alg, "journal_mac");
3178 		break;
3179 	}
3180 	}
3181 }
3182 
3183 static int dm_integrity_iterate_devices(struct dm_target *ti,
3184 					iterate_devices_callout_fn fn, void *data)
3185 {
3186 	struct dm_integrity_c *ic = ti->private;
3187 
3188 	if (!ic->meta_dev)
3189 		return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
3190 	else
3191 		return fn(ti, ic->dev, 0, ti->len, data);
3192 }
3193 
3194 static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
3195 {
3196 	struct dm_integrity_c *ic = ti->private;
3197 
3198 	if (ic->sectors_per_block > 1) {
3199 		limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3200 		limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3201 		blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
3202 	}
3203 }
3204 
3205 static void calculate_journal_section_size(struct dm_integrity_c *ic)
3206 {
3207 	unsigned sector_space = JOURNAL_SECTOR_DATA;
3208 
3209 	ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
3210 	ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
3211 					 JOURNAL_ENTRY_ROUNDUP);
3212 
3213 	if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
3214 		sector_space -= JOURNAL_MAC_PER_SECTOR;
3215 	ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
3216 	ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
3217 	ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
3218 	ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
3219 }
3220 
3221 static int calculate_device_limits(struct dm_integrity_c *ic)
3222 {
3223 	__u64 initial_sectors;
3224 
3225 	calculate_journal_section_size(ic);
3226 	initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
3227 	if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
3228 		return -EINVAL;
3229 	ic->initial_sectors = initial_sectors;
3230 
3231 	if (!ic->meta_dev) {
3232 		sector_t last_sector, last_area, last_offset;
3233 
3234 		/* we have to maintain excessive padding for compatibility with existing volumes */
3235 		__u64 metadata_run_padding =
3236 			ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
3237 			(__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
3238 			(__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
3239 
3240 		ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
3241 					    metadata_run_padding) >> SECTOR_SHIFT;
3242 		if (!(ic->metadata_run & (ic->metadata_run - 1)))
3243 			ic->log2_metadata_run = __ffs(ic->metadata_run);
3244 		else
3245 			ic->log2_metadata_run = -1;
3246 
3247 		get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
3248 		last_sector = get_data_sector(ic, last_area, last_offset);
3249 		if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
3250 			return -EINVAL;
3251 	} else {
3252 		__u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
3253 		meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
3254 				>> (ic->log2_buffer_sectors + SECTOR_SHIFT);
3255 		meta_size <<= ic->log2_buffer_sectors;
3256 		if (ic->initial_sectors + meta_size < ic->initial_sectors ||
3257 		    ic->initial_sectors + meta_size > ic->meta_device_sectors)
3258 			return -EINVAL;
3259 		ic->metadata_run = 1;
3260 		ic->log2_metadata_run = 0;
3261 	}
3262 
3263 	return 0;
3264 }
3265 
3266 static void get_provided_data_sectors(struct dm_integrity_c *ic)
3267 {
3268 	if (!ic->meta_dev) {
3269 		int test_bit;
3270 		ic->provided_data_sectors = 0;
3271 		for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
3272 			__u64 prev_data_sectors = ic->provided_data_sectors;
3273 
3274 			ic->provided_data_sectors |= (sector_t)1 << test_bit;
3275 			if (calculate_device_limits(ic))
3276 				ic->provided_data_sectors = prev_data_sectors;
3277 		}
3278 	} else {
3279 		ic->provided_data_sectors = ic->data_device_sectors;
3280 		ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
3281 	}
3282 }
3283 
3284 static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
3285 {
3286 	unsigned journal_sections;
3287 	int test_bit;
3288 
3289 	memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
3290 	memcpy(ic->sb->magic, SB_MAGIC, 8);
3291 	ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
3292 	ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
3293 	if (ic->journal_mac_alg.alg_string)
3294 		ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
3295 
3296 	calculate_journal_section_size(ic);
3297 	journal_sections = journal_sectors / ic->journal_section_sectors;
3298 	if (!journal_sections)
3299 		journal_sections = 1;
3300 
3301 	if (!ic->meta_dev) {
3302 		if (ic->fix_padding)
3303 			ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
3304 		ic->sb->journal_sections = cpu_to_le32(journal_sections);
3305 		if (!interleave_sectors)
3306 			interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3307 		ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
3308 		ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3309 		ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3310 
3311 		get_provided_data_sectors(ic);
3312 		if (!ic->provided_data_sectors)
3313 			return -EINVAL;
3314 	} else {
3315 		ic->sb->log2_interleave_sectors = 0;
3316 
3317 		get_provided_data_sectors(ic);
3318 		if (!ic->provided_data_sectors)
3319 			return -EINVAL;
3320 
3321 try_smaller_buffer:
3322 		ic->sb->journal_sections = cpu_to_le32(0);
3323 		for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
3324 			__u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
3325 			__u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
3326 			if (test_journal_sections > journal_sections)
3327 				continue;
3328 			ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
3329 			if (calculate_device_limits(ic))
3330 				ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
3331 
3332 		}
3333 		if (!le32_to_cpu(ic->sb->journal_sections)) {
3334 			if (ic->log2_buffer_sectors > 3) {
3335 				ic->log2_buffer_sectors--;
3336 				goto try_smaller_buffer;
3337 			}
3338 			return -EINVAL;
3339 		}
3340 	}
3341 
3342 	ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3343 
3344 	sb_set_version(ic);
3345 
3346 	return 0;
3347 }
3348 
3349 static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
3350 {
3351 	struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
3352 	struct blk_integrity bi;
3353 
3354 	memset(&bi, 0, sizeof(bi));
3355 	bi.profile = &dm_integrity_profile;
3356 	bi.tuple_size = ic->tag_size;
3357 	bi.tag_size = bi.tuple_size;
3358 	bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
3359 
3360 	blk_integrity_register(disk, &bi);
3361 	blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
3362 }
3363 
3364 static void dm_integrity_free_page_list(struct page_list *pl)
3365 {
3366 	unsigned i;
3367 
3368 	if (!pl)
3369 		return;
3370 	for (i = 0; pl[i].page; i++)
3371 		__free_page(pl[i].page);
3372 	kvfree(pl);
3373 }
3374 
3375 static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
3376 {
3377 	struct page_list *pl;
3378 	unsigned i;
3379 
3380 	pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
3381 	if (!pl)
3382 		return NULL;
3383 
3384 	for (i = 0; i < n_pages; i++) {
3385 		pl[i].page = alloc_page(GFP_KERNEL);
3386 		if (!pl[i].page) {
3387 			dm_integrity_free_page_list(pl);
3388 			return NULL;
3389 		}
3390 		if (i)
3391 			pl[i - 1].next = &pl[i];
3392 	}
3393 	pl[i].page = NULL;
3394 	pl[i].next = NULL;
3395 
3396 	return pl;
3397 }
3398 
3399 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
3400 {
3401 	unsigned i;
3402 	for (i = 0; i < ic->journal_sections; i++)
3403 		kvfree(sl[i]);
3404 	kvfree(sl);
3405 }
3406 
3407 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
3408 								   struct page_list *pl)
3409 {
3410 	struct scatterlist **sl;
3411 	unsigned i;
3412 
3413 	sl = kvmalloc_array(ic->journal_sections,
3414 			    sizeof(struct scatterlist *),
3415 			    GFP_KERNEL | __GFP_ZERO);
3416 	if (!sl)
3417 		return NULL;
3418 
3419 	for (i = 0; i < ic->journal_sections; i++) {
3420 		struct scatterlist *s;
3421 		unsigned start_index, start_offset;
3422 		unsigned end_index, end_offset;
3423 		unsigned n_pages;
3424 		unsigned idx;
3425 
3426 		page_list_location(ic, i, 0, &start_index, &start_offset);
3427 		page_list_location(ic, i, ic->journal_section_sectors - 1,
3428 				   &end_index, &end_offset);
3429 
3430 		n_pages = (end_index - start_index + 1);
3431 
3432 		s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
3433 				   GFP_KERNEL);
3434 		if (!s) {
3435 			dm_integrity_free_journal_scatterlist(ic, sl);
3436 			return NULL;
3437 		}
3438 
3439 		sg_init_table(s, n_pages);
3440 		for (idx = start_index; idx <= end_index; idx++) {
3441 			char *va = lowmem_page_address(pl[idx].page);
3442 			unsigned start = 0, end = PAGE_SIZE;
3443 			if (idx == start_index)
3444 				start = start_offset;
3445 			if (idx == end_index)
3446 				end = end_offset + (1 << SECTOR_SHIFT);
3447 			sg_set_buf(&s[idx - start_index], va + start, end - start);
3448 		}
3449 
3450 		sl[i] = s;
3451 	}
3452 
3453 	return sl;
3454 }
3455 
3456 static void free_alg(struct alg_spec *a)
3457 {
3458 	kfree_sensitive(a->alg_string);
3459 	kfree_sensitive(a->key);
3460 	memset(a, 0, sizeof *a);
3461 }
3462 
3463 static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
3464 {
3465 	char *k;
3466 
3467 	free_alg(a);
3468 
3469 	a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
3470 	if (!a->alg_string)
3471 		goto nomem;
3472 
3473 	k = strchr(a->alg_string, ':');
3474 	if (k) {
3475 		*k = 0;
3476 		a->key_string = k + 1;
3477 		if (strlen(a->key_string) & 1)
3478 			goto inval;
3479 
3480 		a->key_size = strlen(a->key_string) / 2;
3481 		a->key = kmalloc(a->key_size, GFP_KERNEL);
3482 		if (!a->key)
3483 			goto nomem;
3484 		if (hex2bin(a->key, a->key_string, a->key_size))
3485 			goto inval;
3486 	}
3487 
3488 	return 0;
3489 inval:
3490 	*error = error_inval;
3491 	return -EINVAL;
3492 nomem:
3493 	*error = "Out of memory for an argument";
3494 	return -ENOMEM;
3495 }
3496 
3497 static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
3498 		   char *error_alg, char *error_key)
3499 {
3500 	int r;
3501 
3502 	if (a->alg_string) {
3503 		*hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3504 		if (IS_ERR(*hash)) {
3505 			*error = error_alg;
3506 			r = PTR_ERR(*hash);
3507 			*hash = NULL;
3508 			return r;
3509 		}
3510 
3511 		if (a->key) {
3512 			r = crypto_shash_setkey(*hash, a->key, a->key_size);
3513 			if (r) {
3514 				*error = error_key;
3515 				return r;
3516 			}
3517 		} else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
3518 			*error = error_key;
3519 			return -ENOKEY;
3520 		}
3521 	}
3522 
3523 	return 0;
3524 }
3525 
3526 static int create_journal(struct dm_integrity_c *ic, char **error)
3527 {
3528 	int r = 0;
3529 	unsigned i;
3530 	__u64 journal_pages, journal_desc_size, journal_tree_size;
3531 	unsigned char *crypt_data = NULL, *crypt_iv = NULL;
3532 	struct skcipher_request *req = NULL;
3533 
3534 	ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
3535 	ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
3536 	ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
3537 	ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
3538 
3539 	journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
3540 				PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
3541 	journal_desc_size = journal_pages * sizeof(struct page_list);
3542 	if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
3543 		*error = "Journal doesn't fit into memory";
3544 		r = -ENOMEM;
3545 		goto bad;
3546 	}
3547 	ic->journal_pages = journal_pages;
3548 
3549 	ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
3550 	if (!ic->journal) {
3551 		*error = "Could not allocate memory for journal";
3552 		r = -ENOMEM;
3553 		goto bad;
3554 	}
3555 	if (ic->journal_crypt_alg.alg_string) {
3556 		unsigned ivsize, blocksize;
3557 		struct journal_completion comp;
3558 
3559 		comp.ic = ic;
3560 		ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3561 		if (IS_ERR(ic->journal_crypt)) {
3562 			*error = "Invalid journal cipher";
3563 			r = PTR_ERR(ic->journal_crypt);
3564 			ic->journal_crypt = NULL;
3565 			goto bad;
3566 		}
3567 		ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
3568 		blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
3569 
3570 		if (ic->journal_crypt_alg.key) {
3571 			r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
3572 						   ic->journal_crypt_alg.key_size);
3573 			if (r) {
3574 				*error = "Error setting encryption key";
3575 				goto bad;
3576 			}
3577 		}
3578 		DEBUG_print("cipher %s, block size %u iv size %u\n",
3579 			    ic->journal_crypt_alg.alg_string, blocksize, ivsize);
3580 
3581 		ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
3582 		if (!ic->journal_io) {
3583 			*error = "Could not allocate memory for journal io";
3584 			r = -ENOMEM;
3585 			goto bad;
3586 		}
3587 
3588 		if (blocksize == 1) {
3589 			struct scatterlist *sg;
3590 
3591 			req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3592 			if (!req) {
3593 				*error = "Could not allocate crypt request";
3594 				r = -ENOMEM;
3595 				goto bad;
3596 			}
3597 
3598 			crypt_iv = kzalloc(ivsize, GFP_KERNEL);
3599 			if (!crypt_iv) {
3600 				*error = "Could not allocate iv";
3601 				r = -ENOMEM;
3602 				goto bad;
3603 			}
3604 
3605 			ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
3606 			if (!ic->journal_xor) {
3607 				*error = "Could not allocate memory for journal xor";
3608 				r = -ENOMEM;
3609 				goto bad;
3610 			}
3611 
3612 			sg = kvmalloc_array(ic->journal_pages + 1,
3613 					    sizeof(struct scatterlist),
3614 					    GFP_KERNEL);
3615 			if (!sg) {
3616 				*error = "Unable to allocate sg list";
3617 				r = -ENOMEM;
3618 				goto bad;
3619 			}
3620 			sg_init_table(sg, ic->journal_pages + 1);
3621 			for (i = 0; i < ic->journal_pages; i++) {
3622 				char *va = lowmem_page_address(ic->journal_xor[i].page);
3623 				clear_page(va);
3624 				sg_set_buf(&sg[i], va, PAGE_SIZE);
3625 			}
3626 			sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
3627 
3628 			skcipher_request_set_crypt(req, sg, sg,
3629 						   PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
3630 			init_completion(&comp.comp);
3631 			comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3632 			if (do_crypt(true, req, &comp))
3633 				wait_for_completion(&comp.comp);
3634 			kvfree(sg);
3635 			r = dm_integrity_failed(ic);
3636 			if (r) {
3637 				*error = "Unable to encrypt journal";
3638 				goto bad;
3639 			}
3640 			DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
3641 
3642 			crypto_free_skcipher(ic->journal_crypt);
3643 			ic->journal_crypt = NULL;
3644 		} else {
3645 			unsigned crypt_len = roundup(ivsize, blocksize);
3646 
3647 			req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3648 			if (!req) {
3649 				*error = "Could not allocate crypt request";
3650 				r = -ENOMEM;
3651 				goto bad;
3652 			}
3653 
3654 			crypt_iv = kmalloc(ivsize, GFP_KERNEL);
3655 			if (!crypt_iv) {
3656 				*error = "Could not allocate iv";
3657 				r = -ENOMEM;
3658 				goto bad;
3659 			}
3660 
3661 			crypt_data = kmalloc(crypt_len, GFP_KERNEL);
3662 			if (!crypt_data) {
3663 				*error = "Unable to allocate crypt data";
3664 				r = -ENOMEM;
3665 				goto bad;
3666 			}
3667 
3668 			ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
3669 			if (!ic->journal_scatterlist) {
3670 				*error = "Unable to allocate sg list";
3671 				r = -ENOMEM;
3672 				goto bad;
3673 			}
3674 			ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
3675 			if (!ic->journal_io_scatterlist) {
3676 				*error = "Unable to allocate sg list";
3677 				r = -ENOMEM;
3678 				goto bad;
3679 			}
3680 			ic->sk_requests = kvmalloc_array(ic->journal_sections,
3681 							 sizeof(struct skcipher_request *),
3682 							 GFP_KERNEL | __GFP_ZERO);
3683 			if (!ic->sk_requests) {
3684 				*error = "Unable to allocate sk requests";
3685 				r = -ENOMEM;
3686 				goto bad;
3687 			}
3688 			for (i = 0; i < ic->journal_sections; i++) {
3689 				struct scatterlist sg;
3690 				struct skcipher_request *section_req;
3691 				__u32 section_le = cpu_to_le32(i);
3692 
3693 				memset(crypt_iv, 0x00, ivsize);
3694 				memset(crypt_data, 0x00, crypt_len);
3695 				memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
3696 
3697 				sg_init_one(&sg, crypt_data, crypt_len);
3698 				skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
3699 				init_completion(&comp.comp);
3700 				comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3701 				if (do_crypt(true, req, &comp))
3702 					wait_for_completion(&comp.comp);
3703 
3704 				r = dm_integrity_failed(ic);
3705 				if (r) {
3706 					*error = "Unable to generate iv";
3707 					goto bad;
3708 				}
3709 
3710 				section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3711 				if (!section_req) {
3712 					*error = "Unable to allocate crypt request";
3713 					r = -ENOMEM;
3714 					goto bad;
3715 				}
3716 				section_req->iv = kmalloc_array(ivsize, 2,
3717 								GFP_KERNEL);
3718 				if (!section_req->iv) {
3719 					skcipher_request_free(section_req);
3720 					*error = "Unable to allocate iv";
3721 					r = -ENOMEM;
3722 					goto bad;
3723 				}
3724 				memcpy(section_req->iv + ivsize, crypt_data, ivsize);
3725 				section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3726 				ic->sk_requests[i] = section_req;
3727 				DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
3728 			}
3729 		}
3730 	}
3731 
3732 	for (i = 0; i < N_COMMIT_IDS; i++) {
3733 		unsigned j;
3734 retest_commit_id:
3735 		for (j = 0; j < i; j++) {
3736 			if (ic->commit_ids[j] == ic->commit_ids[i]) {
3737 				ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3738 				goto retest_commit_id;
3739 			}
3740 		}
3741 		DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3742 	}
3743 
3744 	journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3745 	if (journal_tree_size > ULONG_MAX) {
3746 		*error = "Journal doesn't fit into memory";
3747 		r = -ENOMEM;
3748 		goto bad;
3749 	}
3750 	ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
3751 	if (!ic->journal_tree) {
3752 		*error = "Could not allocate memory for journal tree";
3753 		r = -ENOMEM;
3754 	}
3755 bad:
3756 	kfree(crypt_data);
3757 	kfree(crypt_iv);
3758 	skcipher_request_free(req);
3759 
3760 	return r;
3761 }
3762 
3763 /*
3764  * Construct a integrity mapping
3765  *
3766  * Arguments:
3767  *	device
3768  *	offset from the start of the device
3769  *	tag size
3770  *	D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
3771  *	number of optional arguments
3772  *	optional arguments:
3773  *		journal_sectors
3774  *		interleave_sectors
3775  *		buffer_sectors
3776  *		journal_watermark
3777  *		commit_time
3778  *		meta_device
3779  *		block_size
3780  *		sectors_per_bit
3781  *		bitmap_flush_interval
3782  *		internal_hash
3783  *		journal_crypt
3784  *		journal_mac
3785  *		recalculate
3786  */
3787 static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3788 {
3789 	struct dm_integrity_c *ic;
3790 	char dummy;
3791 	int r;
3792 	unsigned extra_args;
3793 	struct dm_arg_set as;
3794 	static const struct dm_arg _args[] = {
3795 		{0, 15, "Invalid number of feature args"},
3796 	};
3797 	unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
3798 	bool should_write_sb;
3799 	__u64 threshold;
3800 	unsigned long long start;
3801 	__s8 log2_sectors_per_bitmap_bit = -1;
3802 	__s8 log2_blocks_per_bitmap_bit;
3803 	__u64 bits_in_journal;
3804 	__u64 n_bitmap_bits;
3805 
3806 #define DIRECT_ARGUMENTS	4
3807 
3808 	if (argc <= DIRECT_ARGUMENTS) {
3809 		ti->error = "Invalid argument count";
3810 		return -EINVAL;
3811 	}
3812 
3813 	ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
3814 	if (!ic) {
3815 		ti->error = "Cannot allocate integrity context";
3816 		return -ENOMEM;
3817 	}
3818 	ti->private = ic;
3819 	ti->per_io_data_size = sizeof(struct dm_integrity_io);
3820 	ic->ti = ti;
3821 
3822 	ic->in_progress = RB_ROOT;
3823 	INIT_LIST_HEAD(&ic->wait_list);
3824 	init_waitqueue_head(&ic->endio_wait);
3825 	bio_list_init(&ic->flush_bio_list);
3826 	init_waitqueue_head(&ic->copy_to_journal_wait);
3827 	init_completion(&ic->crypto_backoff);
3828 	atomic64_set(&ic->number_of_mismatches, 0);
3829 	ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
3830 
3831 	r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
3832 	if (r) {
3833 		ti->error = "Device lookup failed";
3834 		goto bad;
3835 	}
3836 
3837 	if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
3838 		ti->error = "Invalid starting offset";
3839 		r = -EINVAL;
3840 		goto bad;
3841 	}
3842 	ic->start = start;
3843 
3844 	if (strcmp(argv[2], "-")) {
3845 		if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
3846 			ti->error = "Invalid tag size";
3847 			r = -EINVAL;
3848 			goto bad;
3849 		}
3850 	}
3851 
3852 	if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
3853 	    !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
3854 		ic->mode = argv[3][0];
3855 	} else {
3856 		ti->error = "Invalid mode (expecting J, B, D, R)";
3857 		r = -EINVAL;
3858 		goto bad;
3859 	}
3860 
3861 	journal_sectors = 0;
3862 	interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3863 	buffer_sectors = DEFAULT_BUFFER_SECTORS;
3864 	journal_watermark = DEFAULT_JOURNAL_WATERMARK;
3865 	sync_msec = DEFAULT_SYNC_MSEC;
3866 	ic->sectors_per_block = 1;
3867 
3868 	as.argc = argc - DIRECT_ARGUMENTS;
3869 	as.argv = argv + DIRECT_ARGUMENTS;
3870 	r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
3871 	if (r)
3872 		goto bad;
3873 
3874 	while (extra_args--) {
3875 		const char *opt_string;
3876 		unsigned val;
3877 		unsigned long long llval;
3878 		opt_string = dm_shift_arg(&as);
3879 		if (!opt_string) {
3880 			r = -EINVAL;
3881 			ti->error = "Not enough feature arguments";
3882 			goto bad;
3883 		}
3884 		if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
3885 			journal_sectors = val ? val : 1;
3886 		else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
3887 			interleave_sectors = val;
3888 		else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
3889 			buffer_sectors = val;
3890 		else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
3891 			journal_watermark = val;
3892 		else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
3893 			sync_msec = val;
3894 		else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
3895 			if (ic->meta_dev) {
3896 				dm_put_device(ti, ic->meta_dev);
3897 				ic->meta_dev = NULL;
3898 			}
3899 			r = dm_get_device(ti, strchr(opt_string, ':') + 1,
3900 					  dm_table_get_mode(ti->table), &ic->meta_dev);
3901 			if (r) {
3902 				ti->error = "Device lookup failed";
3903 				goto bad;
3904 			}
3905 		} else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
3906 			if (val < 1 << SECTOR_SHIFT ||
3907 			    val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
3908 			    (val & (val -1))) {
3909 				r = -EINVAL;
3910 				ti->error = "Invalid block_size argument";
3911 				goto bad;
3912 			}
3913 			ic->sectors_per_block = val >> SECTOR_SHIFT;
3914 		} else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
3915 			log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
3916 		} else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
3917 			if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
3918 				r = -EINVAL;
3919 				ti->error = "Invalid bitmap_flush_interval argument";
3920 			}
3921 			ic->bitmap_flush_interval = msecs_to_jiffies(val);
3922 		} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
3923 			r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
3924 					    "Invalid internal_hash argument");
3925 			if (r)
3926 				goto bad;
3927 		} else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
3928 			r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
3929 					    "Invalid journal_crypt argument");
3930 			if (r)
3931 				goto bad;
3932 		} else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
3933 			r = get_alg_and_key(opt_string, &ic->journal_mac_alg,  &ti->error,
3934 					    "Invalid journal_mac argument");
3935 			if (r)
3936 				goto bad;
3937 		} else if (!strcmp(opt_string, "recalculate")) {
3938 			ic->recalculate_flag = true;
3939 		} else if (!strcmp(opt_string, "allow_discards")) {
3940 			ic->discard = true;
3941 		} else if (!strcmp(opt_string, "fix_padding")) {
3942 			ic->fix_padding = true;
3943 		} else {
3944 			r = -EINVAL;
3945 			ti->error = "Invalid argument";
3946 			goto bad;
3947 		}
3948 	}
3949 
3950 	ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
3951 	if (!ic->meta_dev)
3952 		ic->meta_device_sectors = ic->data_device_sectors;
3953 	else
3954 		ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
3955 
3956 	if (!journal_sectors) {
3957 		journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
3958 				      ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
3959 	}
3960 
3961 	if (!buffer_sectors)
3962 		buffer_sectors = 1;
3963 	ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
3964 
3965 	r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
3966 		    "Invalid internal hash", "Error setting internal hash key");
3967 	if (r)
3968 		goto bad;
3969 
3970 	r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
3971 		    "Invalid journal mac", "Error setting journal mac key");
3972 	if (r)
3973 		goto bad;
3974 
3975 	if (!ic->tag_size) {
3976 		if (!ic->internal_hash) {
3977 			ti->error = "Unknown tag size";
3978 			r = -EINVAL;
3979 			goto bad;
3980 		}
3981 		ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
3982 	}
3983 	if (ic->tag_size > MAX_TAG_SIZE) {
3984 		ti->error = "Too big tag size";
3985 		r = -EINVAL;
3986 		goto bad;
3987 	}
3988 	if (!(ic->tag_size & (ic->tag_size - 1)))
3989 		ic->log2_tag_size = __ffs(ic->tag_size);
3990 	else
3991 		ic->log2_tag_size = -1;
3992 
3993 	if (ic->mode == 'B' && !ic->internal_hash) {
3994 		r = -EINVAL;
3995 		ti->error = "Bitmap mode can be only used with internal hash";
3996 		goto bad;
3997 	}
3998 
3999 	if (ic->discard && !ic->internal_hash) {
4000 		r = -EINVAL;
4001 		ti->error = "Discard can be only used with internal hash";
4002 		goto bad;
4003 	}
4004 
4005 	ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
4006 	ic->autocommit_msec = sync_msec;
4007 	timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
4008 
4009 	ic->io = dm_io_client_create();
4010 	if (IS_ERR(ic->io)) {
4011 		r = PTR_ERR(ic->io);
4012 		ic->io = NULL;
4013 		ti->error = "Cannot allocate dm io";
4014 		goto bad;
4015 	}
4016 
4017 	r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
4018 	if (r) {
4019 		ti->error = "Cannot allocate mempool";
4020 		goto bad;
4021 	}
4022 
4023 	ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
4024 					  WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
4025 	if (!ic->metadata_wq) {
4026 		ti->error = "Cannot allocate workqueue";
4027 		r = -ENOMEM;
4028 		goto bad;
4029 	}
4030 
4031 	/*
4032 	 * If this workqueue were percpu, it would cause bio reordering
4033 	 * and reduced performance.
4034 	 */
4035 	ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
4036 	if (!ic->wait_wq) {
4037 		ti->error = "Cannot allocate workqueue";
4038 		r = -ENOMEM;
4039 		goto bad;
4040 	}
4041 
4042 	ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
4043 					  METADATA_WORKQUEUE_MAX_ACTIVE);
4044 	if (!ic->offload_wq) {
4045 		ti->error = "Cannot allocate workqueue";
4046 		r = -ENOMEM;
4047 		goto bad;
4048 	}
4049 
4050 	ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
4051 	if (!ic->commit_wq) {
4052 		ti->error = "Cannot allocate workqueue";
4053 		r = -ENOMEM;
4054 		goto bad;
4055 	}
4056 	INIT_WORK(&ic->commit_work, integrity_commit);
4057 
4058 	if (ic->mode == 'J' || ic->mode == 'B') {
4059 		ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
4060 		if (!ic->writer_wq) {
4061 			ti->error = "Cannot allocate workqueue";
4062 			r = -ENOMEM;
4063 			goto bad;
4064 		}
4065 		INIT_WORK(&ic->writer_work, integrity_writer);
4066 	}
4067 
4068 	ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
4069 	if (!ic->sb) {
4070 		r = -ENOMEM;
4071 		ti->error = "Cannot allocate superblock area";
4072 		goto bad;
4073 	}
4074 
4075 	r = sync_rw_sb(ic, REQ_OP_READ, 0);
4076 	if (r) {
4077 		ti->error = "Error reading superblock";
4078 		goto bad;
4079 	}
4080 	should_write_sb = false;
4081 	if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
4082 		if (ic->mode != 'R') {
4083 			if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
4084 				r = -EINVAL;
4085 				ti->error = "The device is not initialized";
4086 				goto bad;
4087 			}
4088 		}
4089 
4090 		r = initialize_superblock(ic, journal_sectors, interleave_sectors);
4091 		if (r) {
4092 			ti->error = "Could not initialize superblock";
4093 			goto bad;
4094 		}
4095 		if (ic->mode != 'R')
4096 			should_write_sb = true;
4097 	}
4098 
4099 	if (!ic->sb->version || ic->sb->version > SB_VERSION_4) {
4100 		r = -EINVAL;
4101 		ti->error = "Unknown version";
4102 		goto bad;
4103 	}
4104 	if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
4105 		r = -EINVAL;
4106 		ti->error = "Tag size doesn't match the information in superblock";
4107 		goto bad;
4108 	}
4109 	if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
4110 		r = -EINVAL;
4111 		ti->error = "Block size doesn't match the information in superblock";
4112 		goto bad;
4113 	}
4114 	if (!le32_to_cpu(ic->sb->journal_sections)) {
4115 		r = -EINVAL;
4116 		ti->error = "Corrupted superblock, journal_sections is 0";
4117 		goto bad;
4118 	}
4119 	/* make sure that ti->max_io_len doesn't overflow */
4120 	if (!ic->meta_dev) {
4121 		if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
4122 		    ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
4123 			r = -EINVAL;
4124 			ti->error = "Invalid interleave_sectors in the superblock";
4125 			goto bad;
4126 		}
4127 	} else {
4128 		if (ic->sb->log2_interleave_sectors) {
4129 			r = -EINVAL;
4130 			ti->error = "Invalid interleave_sectors in the superblock";
4131 			goto bad;
4132 		}
4133 	}
4134 	if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
4135 		r = -EINVAL;
4136 		ti->error = "Journal mac mismatch";
4137 		goto bad;
4138 	}
4139 
4140 	get_provided_data_sectors(ic);
4141 	if (!ic->provided_data_sectors) {
4142 		r = -EINVAL;
4143 		ti->error = "The device is too small";
4144 		goto bad;
4145 	}
4146 
4147 try_smaller_buffer:
4148 	r = calculate_device_limits(ic);
4149 	if (r) {
4150 		if (ic->meta_dev) {
4151 			if (ic->log2_buffer_sectors > 3) {
4152 				ic->log2_buffer_sectors--;
4153 				goto try_smaller_buffer;
4154 			}
4155 		}
4156 		ti->error = "The device is too small";
4157 		goto bad;
4158 	}
4159 
4160 	if (log2_sectors_per_bitmap_bit < 0)
4161 		log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
4162 	if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
4163 		log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
4164 
4165 	bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
4166 	if (bits_in_journal > UINT_MAX)
4167 		bits_in_journal = UINT_MAX;
4168 	while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
4169 		log2_sectors_per_bitmap_bit++;
4170 
4171 	log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
4172 	ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4173 	if (should_write_sb) {
4174 		ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4175 	}
4176 	n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
4177 				+ (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
4178 	ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
4179 
4180 	if (!ic->meta_dev)
4181 		ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
4182 
4183 	if (ti->len > ic->provided_data_sectors) {
4184 		r = -EINVAL;
4185 		ti->error = "Not enough provided sectors for requested mapping size";
4186 		goto bad;
4187 	}
4188 
4189 
4190 	threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
4191 	threshold += 50;
4192 	do_div(threshold, 100);
4193 	ic->free_sectors_threshold = threshold;
4194 
4195 	DEBUG_print("initialized:\n");
4196 	DEBUG_print("	integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
4197 	DEBUG_print("	journal_entry_size %u\n", ic->journal_entry_size);
4198 	DEBUG_print("	journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
4199 	DEBUG_print("	journal_section_entries %u\n", ic->journal_section_entries);
4200 	DEBUG_print("	journal_section_sectors %u\n", ic->journal_section_sectors);
4201 	DEBUG_print("	journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
4202 	DEBUG_print("	journal_entries %u\n", ic->journal_entries);
4203 	DEBUG_print("	log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
4204 	DEBUG_print("	data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
4205 	DEBUG_print("	initial_sectors 0x%x\n", ic->initial_sectors);
4206 	DEBUG_print("	metadata_run 0x%x\n", ic->metadata_run);
4207 	DEBUG_print("	log2_metadata_run %d\n", ic->log2_metadata_run);
4208 	DEBUG_print("	provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
4209 	DEBUG_print("	log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
4210 	DEBUG_print("	bits_in_journal %llu\n", bits_in_journal);
4211 
4212 	if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
4213 		ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
4214 		ic->sb->recalc_sector = cpu_to_le64(0);
4215 	}
4216 
4217 	if (ic->internal_hash) {
4218 		ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
4219 		if (!ic->recalc_wq ) {
4220 			ti->error = "Cannot allocate workqueue";
4221 			r = -ENOMEM;
4222 			goto bad;
4223 		}
4224 		INIT_WORK(&ic->recalc_work, integrity_recalc);
4225 		ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
4226 		if (!ic->recalc_buffer) {
4227 			ti->error = "Cannot allocate buffer for recalculating";
4228 			r = -ENOMEM;
4229 			goto bad;
4230 		}
4231 		ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
4232 						 ic->tag_size, GFP_KERNEL);
4233 		if (!ic->recalc_tags) {
4234 			ti->error = "Cannot allocate tags for recalculating";
4235 			r = -ENOMEM;
4236 			goto bad;
4237 		}
4238 	}
4239 
4240 	ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
4241 			1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
4242 	if (IS_ERR(ic->bufio)) {
4243 		r = PTR_ERR(ic->bufio);
4244 		ti->error = "Cannot initialize dm-bufio";
4245 		ic->bufio = NULL;
4246 		goto bad;
4247 	}
4248 	dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
4249 
4250 	if (ic->mode != 'R') {
4251 		r = create_journal(ic, &ti->error);
4252 		if (r)
4253 			goto bad;
4254 
4255 	}
4256 
4257 	if (ic->mode == 'B') {
4258 		unsigned i;
4259 		unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
4260 
4261 		ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4262 		if (!ic->recalc_bitmap) {
4263 			r = -ENOMEM;
4264 			goto bad;
4265 		}
4266 		ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4267 		if (!ic->may_write_bitmap) {
4268 			r = -ENOMEM;
4269 			goto bad;
4270 		}
4271 		ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
4272 		if (!ic->bbs) {
4273 			r = -ENOMEM;
4274 			goto bad;
4275 		}
4276 		INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
4277 		for (i = 0; i < ic->n_bitmap_blocks; i++) {
4278 			struct bitmap_block_status *bbs = &ic->bbs[i];
4279 			unsigned sector, pl_index, pl_offset;
4280 
4281 			INIT_WORK(&bbs->work, bitmap_block_work);
4282 			bbs->ic = ic;
4283 			bbs->idx = i;
4284 			bio_list_init(&bbs->bio_queue);
4285 			spin_lock_init(&bbs->bio_queue_lock);
4286 
4287 			sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
4288 			pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
4289 			pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
4290 
4291 			bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
4292 		}
4293 	}
4294 
4295 	if (should_write_sb) {
4296 		int r;
4297 
4298 		init_journal(ic, 0, ic->journal_sections, 0);
4299 		r = dm_integrity_failed(ic);
4300 		if (unlikely(r)) {
4301 			ti->error = "Error initializing journal";
4302 			goto bad;
4303 		}
4304 		r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
4305 		if (r) {
4306 			ti->error = "Error initializing superblock";
4307 			goto bad;
4308 		}
4309 		ic->just_formatted = true;
4310 	}
4311 
4312 	if (!ic->meta_dev) {
4313 		r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
4314 		if (r)
4315 			goto bad;
4316 	}
4317 	if (ic->mode == 'B') {
4318 		unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
4319 		if (!max_io_len)
4320 			max_io_len = 1U << 31;
4321 		DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
4322 		if (!ti->max_io_len || ti->max_io_len > max_io_len) {
4323 			r = dm_set_target_max_io_len(ti, max_io_len);
4324 			if (r)
4325 				goto bad;
4326 		}
4327 	}
4328 
4329 	if (!ic->internal_hash)
4330 		dm_integrity_set(ti, ic);
4331 
4332 	ti->num_flush_bios = 1;
4333 	ti->flush_supported = true;
4334 	if (ic->discard)
4335 		ti->num_discard_bios = 1;
4336 
4337 	return 0;
4338 
4339 bad:
4340 	dm_integrity_dtr(ti);
4341 	return r;
4342 }
4343 
4344 static void dm_integrity_dtr(struct dm_target *ti)
4345 {
4346 	struct dm_integrity_c *ic = ti->private;
4347 
4348 	BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
4349 	BUG_ON(!list_empty(&ic->wait_list));
4350 
4351 	if (ic->metadata_wq)
4352 		destroy_workqueue(ic->metadata_wq);
4353 	if (ic->wait_wq)
4354 		destroy_workqueue(ic->wait_wq);
4355 	if (ic->offload_wq)
4356 		destroy_workqueue(ic->offload_wq);
4357 	if (ic->commit_wq)
4358 		destroy_workqueue(ic->commit_wq);
4359 	if (ic->writer_wq)
4360 		destroy_workqueue(ic->writer_wq);
4361 	if (ic->recalc_wq)
4362 		destroy_workqueue(ic->recalc_wq);
4363 	vfree(ic->recalc_buffer);
4364 	kvfree(ic->recalc_tags);
4365 	kvfree(ic->bbs);
4366 	if (ic->bufio)
4367 		dm_bufio_client_destroy(ic->bufio);
4368 	mempool_exit(&ic->journal_io_mempool);
4369 	if (ic->io)
4370 		dm_io_client_destroy(ic->io);
4371 	if (ic->dev)
4372 		dm_put_device(ti, ic->dev);
4373 	if (ic->meta_dev)
4374 		dm_put_device(ti, ic->meta_dev);
4375 	dm_integrity_free_page_list(ic->journal);
4376 	dm_integrity_free_page_list(ic->journal_io);
4377 	dm_integrity_free_page_list(ic->journal_xor);
4378 	dm_integrity_free_page_list(ic->recalc_bitmap);
4379 	dm_integrity_free_page_list(ic->may_write_bitmap);
4380 	if (ic->journal_scatterlist)
4381 		dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
4382 	if (ic->journal_io_scatterlist)
4383 		dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
4384 	if (ic->sk_requests) {
4385 		unsigned i;
4386 
4387 		for (i = 0; i < ic->journal_sections; i++) {
4388 			struct skcipher_request *req = ic->sk_requests[i];
4389 			if (req) {
4390 				kfree_sensitive(req->iv);
4391 				skcipher_request_free(req);
4392 			}
4393 		}
4394 		kvfree(ic->sk_requests);
4395 	}
4396 	kvfree(ic->journal_tree);
4397 	if (ic->sb)
4398 		free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
4399 
4400 	if (ic->internal_hash)
4401 		crypto_free_shash(ic->internal_hash);
4402 	free_alg(&ic->internal_hash_alg);
4403 
4404 	if (ic->journal_crypt)
4405 		crypto_free_skcipher(ic->journal_crypt);
4406 	free_alg(&ic->journal_crypt_alg);
4407 
4408 	if (ic->journal_mac)
4409 		crypto_free_shash(ic->journal_mac);
4410 	free_alg(&ic->journal_mac_alg);
4411 
4412 	kfree(ic);
4413 }
4414 
4415 static struct target_type integrity_target = {
4416 	.name			= "integrity",
4417 	.version		= {1, 6, 0},
4418 	.module			= THIS_MODULE,
4419 	.features		= DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
4420 	.ctr			= dm_integrity_ctr,
4421 	.dtr			= dm_integrity_dtr,
4422 	.map			= dm_integrity_map,
4423 	.postsuspend		= dm_integrity_postsuspend,
4424 	.resume			= dm_integrity_resume,
4425 	.status			= dm_integrity_status,
4426 	.iterate_devices	= dm_integrity_iterate_devices,
4427 	.io_hints		= dm_integrity_io_hints,
4428 };
4429 
4430 static int __init dm_integrity_init(void)
4431 {
4432 	int r;
4433 
4434 	journal_io_cache = kmem_cache_create("integrity_journal_io",
4435 					     sizeof(struct journal_io), 0, 0, NULL);
4436 	if (!journal_io_cache) {
4437 		DMERR("can't allocate journal io cache");
4438 		return -ENOMEM;
4439 	}
4440 
4441 	r = dm_register_target(&integrity_target);
4442 
4443 	if (r < 0)
4444 		DMERR("register failed %d", r);
4445 
4446 	return r;
4447 }
4448 
4449 static void __exit dm_integrity_exit(void)
4450 {
4451 	dm_unregister_target(&integrity_target);
4452 	kmem_cache_destroy(journal_io_cache);
4453 }
4454 
4455 module_init(dm_integrity_init);
4456 module_exit(dm_integrity_exit);
4457 
4458 MODULE_AUTHOR("Milan Broz");
4459 MODULE_AUTHOR("Mikulas Patocka");
4460 MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
4461 MODULE_LICENSE("GPL");
4462