xref: /linux/drivers/md/dm-integrity.c (revision a7f7f6248d9740d710fd6bd190293fe5e16410ac)
1 /*
2  * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
3  * Copyright (C) 2016-2017 Milan Broz
4  * Copyright (C) 2016-2017 Mikulas Patocka
5  *
6  * This file is released under the GPL.
7  */
8 
9 #include "dm-bio-record.h"
10 
11 #include <linux/compiler.h>
12 #include <linux/module.h>
13 #include <linux/device-mapper.h>
14 #include <linux/dm-io.h>
15 #include <linux/vmalloc.h>
16 #include <linux/sort.h>
17 #include <linux/rbtree.h>
18 #include <linux/delay.h>
19 #include <linux/random.h>
20 #include <linux/reboot.h>
21 #include <crypto/hash.h>
22 #include <crypto/skcipher.h>
23 #include <linux/async_tx.h>
24 #include <linux/dm-bufio.h>
25 
26 #define DM_MSG_PREFIX "integrity"
27 
28 #define DEFAULT_INTERLEAVE_SECTORS	32768
29 #define DEFAULT_JOURNAL_SIZE_FACTOR	7
30 #define DEFAULT_SECTORS_PER_BITMAP_BIT	32768
31 #define DEFAULT_BUFFER_SECTORS		128
32 #define DEFAULT_JOURNAL_WATERMARK	50
33 #define DEFAULT_SYNC_MSEC		10000
34 #define DEFAULT_MAX_JOURNAL_SECTORS	131072
35 #define MIN_LOG2_INTERLEAVE_SECTORS	3
36 #define MAX_LOG2_INTERLEAVE_SECTORS	31
37 #define METADATA_WORKQUEUE_MAX_ACTIVE	16
38 #define RECALC_SECTORS			8192
39 #define RECALC_WRITE_SUPER		16
40 #define BITMAP_BLOCK_SIZE		4096	/* don't change it */
41 #define BITMAP_FLUSH_INTERVAL		(10 * HZ)
42 #define DISCARD_FILLER			0xf6
43 
44 /*
45  * Warning - DEBUG_PRINT prints security-sensitive data to the log,
46  * so it should not be enabled in the official kernel
47  */
48 //#define DEBUG_PRINT
49 //#define INTERNAL_VERIFY
50 
51 /*
52  * On disk structures
53  */
54 
55 #define SB_MAGIC			"integrt"
56 #define SB_VERSION_1			1
57 #define SB_VERSION_2			2
58 #define SB_VERSION_3			3
59 #define SB_VERSION_4			4
60 #define SB_SECTORS			8
61 #define MAX_SECTORS_PER_BLOCK		8
62 
63 struct superblock {
64 	__u8 magic[8];
65 	__u8 version;
66 	__u8 log2_interleave_sectors;
67 	__u16 integrity_tag_size;
68 	__u32 journal_sections;
69 	__u64 provided_data_sectors;	/* userspace uses this value */
70 	__u32 flags;
71 	__u8 log2_sectors_per_block;
72 	__u8 log2_blocks_per_bitmap_bit;
73 	__u8 pad[2];
74 	__u64 recalc_sector;
75 };
76 
77 #define SB_FLAG_HAVE_JOURNAL_MAC	0x1
78 #define SB_FLAG_RECALCULATING		0x2
79 #define SB_FLAG_DIRTY_BITMAP		0x4
80 #define SB_FLAG_FIXED_PADDING		0x8
81 
82 #define	JOURNAL_ENTRY_ROUNDUP		8
83 
84 typedef __u64 commit_id_t;
85 #define JOURNAL_MAC_PER_SECTOR		8
86 
87 struct journal_entry {
88 	union {
89 		struct {
90 			__u32 sector_lo;
91 			__u32 sector_hi;
92 		} s;
93 		__u64 sector;
94 	} u;
95 	commit_id_t last_bytes[];
96 	/* __u8 tag[0]; */
97 };
98 
99 #define journal_entry_tag(ic, je)		((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
100 
101 #if BITS_PER_LONG == 64
102 #define journal_entry_set_sector(je, x)		do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
103 #else
104 #define journal_entry_set_sector(je, x)		do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
105 #endif
106 #define journal_entry_get_sector(je)		le64_to_cpu((je)->u.sector)
107 #define journal_entry_is_unused(je)		((je)->u.s.sector_hi == cpu_to_le32(-1))
108 #define journal_entry_set_unused(je)		do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
109 #define journal_entry_is_inprogress(je)		((je)->u.s.sector_hi == cpu_to_le32(-2))
110 #define journal_entry_set_inprogress(je)	do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
111 
112 #define JOURNAL_BLOCK_SECTORS		8
113 #define JOURNAL_SECTOR_DATA		((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
114 #define JOURNAL_MAC_SIZE		(JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
115 
116 struct journal_sector {
117 	__u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
118 	__u8 mac[JOURNAL_MAC_PER_SECTOR];
119 	commit_id_t commit_id;
120 };
121 
122 #define MAX_TAG_SIZE			(JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
123 
124 #define METADATA_PADDING_SECTORS	8
125 
126 #define N_COMMIT_IDS			4
127 
128 static unsigned char prev_commit_seq(unsigned char seq)
129 {
130 	return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
131 }
132 
133 static unsigned char next_commit_seq(unsigned char seq)
134 {
135 	return (seq + 1) % N_COMMIT_IDS;
136 }
137 
138 /*
139  * In-memory structures
140  */
141 
142 struct journal_node {
143 	struct rb_node node;
144 	sector_t sector;
145 };
146 
147 struct alg_spec {
148 	char *alg_string;
149 	char *key_string;
150 	__u8 *key;
151 	unsigned key_size;
152 };
153 
154 struct dm_integrity_c {
155 	struct dm_dev *dev;
156 	struct dm_dev *meta_dev;
157 	unsigned tag_size;
158 	__s8 log2_tag_size;
159 	sector_t start;
160 	mempool_t journal_io_mempool;
161 	struct dm_io_client *io;
162 	struct dm_bufio_client *bufio;
163 	struct workqueue_struct *metadata_wq;
164 	struct superblock *sb;
165 	unsigned journal_pages;
166 	unsigned n_bitmap_blocks;
167 
168 	struct page_list *journal;
169 	struct page_list *journal_io;
170 	struct page_list *journal_xor;
171 	struct page_list *recalc_bitmap;
172 	struct page_list *may_write_bitmap;
173 	struct bitmap_block_status *bbs;
174 	unsigned bitmap_flush_interval;
175 	int synchronous_mode;
176 	struct bio_list synchronous_bios;
177 	struct delayed_work bitmap_flush_work;
178 
179 	struct crypto_skcipher *journal_crypt;
180 	struct scatterlist **journal_scatterlist;
181 	struct scatterlist **journal_io_scatterlist;
182 	struct skcipher_request **sk_requests;
183 
184 	struct crypto_shash *journal_mac;
185 
186 	struct journal_node *journal_tree;
187 	struct rb_root journal_tree_root;
188 
189 	sector_t provided_data_sectors;
190 
191 	unsigned short journal_entry_size;
192 	unsigned char journal_entries_per_sector;
193 	unsigned char journal_section_entries;
194 	unsigned short journal_section_sectors;
195 	unsigned journal_sections;
196 	unsigned journal_entries;
197 	sector_t data_device_sectors;
198 	sector_t meta_device_sectors;
199 	unsigned initial_sectors;
200 	unsigned metadata_run;
201 	__s8 log2_metadata_run;
202 	__u8 log2_buffer_sectors;
203 	__u8 sectors_per_block;
204 	__u8 log2_blocks_per_bitmap_bit;
205 
206 	unsigned char mode;
207 
208 	int failed;
209 
210 	struct crypto_shash *internal_hash;
211 
212 	struct dm_target *ti;
213 
214 	/* these variables are locked with endio_wait.lock */
215 	struct rb_root in_progress;
216 	struct list_head wait_list;
217 	wait_queue_head_t endio_wait;
218 	struct workqueue_struct *wait_wq;
219 	struct workqueue_struct *offload_wq;
220 
221 	unsigned char commit_seq;
222 	commit_id_t commit_ids[N_COMMIT_IDS];
223 
224 	unsigned committed_section;
225 	unsigned n_committed_sections;
226 
227 	unsigned uncommitted_section;
228 	unsigned n_uncommitted_sections;
229 
230 	unsigned free_section;
231 	unsigned char free_section_entry;
232 	unsigned free_sectors;
233 
234 	unsigned free_sectors_threshold;
235 
236 	struct workqueue_struct *commit_wq;
237 	struct work_struct commit_work;
238 
239 	struct workqueue_struct *writer_wq;
240 	struct work_struct writer_work;
241 
242 	struct workqueue_struct *recalc_wq;
243 	struct work_struct recalc_work;
244 	u8 *recalc_buffer;
245 	u8 *recalc_tags;
246 
247 	struct bio_list flush_bio_list;
248 
249 	unsigned long autocommit_jiffies;
250 	struct timer_list autocommit_timer;
251 	unsigned autocommit_msec;
252 
253 	wait_queue_head_t copy_to_journal_wait;
254 
255 	struct completion crypto_backoff;
256 
257 	bool journal_uptodate;
258 	bool just_formatted;
259 	bool recalculate_flag;
260 	bool fix_padding;
261 	bool discard;
262 
263 	struct alg_spec internal_hash_alg;
264 	struct alg_spec journal_crypt_alg;
265 	struct alg_spec journal_mac_alg;
266 
267 	atomic64_t number_of_mismatches;
268 
269 	struct notifier_block reboot_notifier;
270 };
271 
272 struct dm_integrity_range {
273 	sector_t logical_sector;
274 	sector_t n_sectors;
275 	bool waiting;
276 	union {
277 		struct rb_node node;
278 		struct {
279 			struct task_struct *task;
280 			struct list_head wait_entry;
281 		};
282 	};
283 };
284 
285 struct dm_integrity_io {
286 	struct work_struct work;
287 
288 	struct dm_integrity_c *ic;
289 	enum req_opf op;
290 	bool fua;
291 
292 	struct dm_integrity_range range;
293 
294 	sector_t metadata_block;
295 	unsigned metadata_offset;
296 
297 	atomic_t in_flight;
298 	blk_status_t bi_status;
299 
300 	struct completion *completion;
301 
302 	struct dm_bio_details bio_details;
303 };
304 
305 struct journal_completion {
306 	struct dm_integrity_c *ic;
307 	atomic_t in_flight;
308 	struct completion comp;
309 };
310 
311 struct journal_io {
312 	struct dm_integrity_range range;
313 	struct journal_completion *comp;
314 };
315 
316 struct bitmap_block_status {
317 	struct work_struct work;
318 	struct dm_integrity_c *ic;
319 	unsigned idx;
320 	unsigned long *bitmap;
321 	struct bio_list bio_queue;
322 	spinlock_t bio_queue_lock;
323 
324 };
325 
326 static struct kmem_cache *journal_io_cache;
327 
328 #define JOURNAL_IO_MEMPOOL	32
329 
330 #ifdef DEBUG_PRINT
331 #define DEBUG_print(x, ...)	printk(KERN_DEBUG x, ##__VA_ARGS__)
332 static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
333 {
334 	va_list args;
335 	va_start(args, msg);
336 	vprintk(msg, args);
337 	va_end(args);
338 	if (len)
339 		pr_cont(":");
340 	while (len) {
341 		pr_cont(" %02x", *bytes);
342 		bytes++;
343 		len--;
344 	}
345 	pr_cont("\n");
346 }
347 #define DEBUG_bytes(bytes, len, msg, ...)	__DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
348 #else
349 #define DEBUG_print(x, ...)			do { } while (0)
350 #define DEBUG_bytes(bytes, len, msg, ...)	do { } while (0)
351 #endif
352 
353 static void dm_integrity_prepare(struct request *rq)
354 {
355 }
356 
357 static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
358 {
359 }
360 
361 /*
362  * DM Integrity profile, protection is performed layer above (dm-crypt)
363  */
364 static const struct blk_integrity_profile dm_integrity_profile = {
365 	.name			= "DM-DIF-EXT-TAG",
366 	.generate_fn		= NULL,
367 	.verify_fn		= NULL,
368 	.prepare_fn		= dm_integrity_prepare,
369 	.complete_fn		= dm_integrity_complete,
370 };
371 
372 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
373 static void integrity_bio_wait(struct work_struct *w);
374 static void dm_integrity_dtr(struct dm_target *ti);
375 
376 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
377 {
378 	if (err == -EILSEQ)
379 		atomic64_inc(&ic->number_of_mismatches);
380 	if (!cmpxchg(&ic->failed, 0, err))
381 		DMERR("Error on %s: %d", msg, err);
382 }
383 
384 static int dm_integrity_failed(struct dm_integrity_c *ic)
385 {
386 	return READ_ONCE(ic->failed);
387 }
388 
389 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
390 					  unsigned j, unsigned char seq)
391 {
392 	/*
393 	 * Xor the number with section and sector, so that if a piece of
394 	 * journal is written at wrong place, it is detected.
395 	 */
396 	return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
397 }
398 
399 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
400 				sector_t *area, sector_t *offset)
401 {
402 	if (!ic->meta_dev) {
403 		__u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
404 		*area = data_sector >> log2_interleave_sectors;
405 		*offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
406 	} else {
407 		*area = 0;
408 		*offset = data_sector;
409 	}
410 }
411 
412 #define sector_to_block(ic, n)						\
413 do {									\
414 	BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1));		\
415 	(n) >>= (ic)->sb->log2_sectors_per_block;			\
416 } while (0)
417 
418 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
419 					    sector_t offset, unsigned *metadata_offset)
420 {
421 	__u64 ms;
422 	unsigned mo;
423 
424 	ms = area << ic->sb->log2_interleave_sectors;
425 	if (likely(ic->log2_metadata_run >= 0))
426 		ms += area << ic->log2_metadata_run;
427 	else
428 		ms += area * ic->metadata_run;
429 	ms >>= ic->log2_buffer_sectors;
430 
431 	sector_to_block(ic, offset);
432 
433 	if (likely(ic->log2_tag_size >= 0)) {
434 		ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
435 		mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
436 	} else {
437 		ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
438 		mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
439 	}
440 	*metadata_offset = mo;
441 	return ms;
442 }
443 
444 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
445 {
446 	sector_t result;
447 
448 	if (ic->meta_dev)
449 		return offset;
450 
451 	result = area << ic->sb->log2_interleave_sectors;
452 	if (likely(ic->log2_metadata_run >= 0))
453 		result += (area + 1) << ic->log2_metadata_run;
454 	else
455 		result += (area + 1) * ic->metadata_run;
456 
457 	result += (sector_t)ic->initial_sectors + offset;
458 	result += ic->start;
459 
460 	return result;
461 }
462 
463 static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
464 {
465 	if (unlikely(*sec_ptr >= ic->journal_sections))
466 		*sec_ptr -= ic->journal_sections;
467 }
468 
469 static void sb_set_version(struct dm_integrity_c *ic)
470 {
471 	if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
472 		ic->sb->version = SB_VERSION_4;
473 	else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
474 		ic->sb->version = SB_VERSION_3;
475 	else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
476 		ic->sb->version = SB_VERSION_2;
477 	else
478 		ic->sb->version = SB_VERSION_1;
479 }
480 
481 static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
482 {
483 	struct dm_io_request io_req;
484 	struct dm_io_region io_loc;
485 
486 	io_req.bi_op = op;
487 	io_req.bi_op_flags = op_flags;
488 	io_req.mem.type = DM_IO_KMEM;
489 	io_req.mem.ptr.addr = ic->sb;
490 	io_req.notify.fn = NULL;
491 	io_req.client = ic->io;
492 	io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
493 	io_loc.sector = ic->start;
494 	io_loc.count = SB_SECTORS;
495 
496 	if (op == REQ_OP_WRITE)
497 		sb_set_version(ic);
498 
499 	return dm_io(&io_req, 1, &io_loc, NULL);
500 }
501 
502 #define BITMAP_OP_TEST_ALL_SET		0
503 #define BITMAP_OP_TEST_ALL_CLEAR	1
504 #define BITMAP_OP_SET			2
505 #define BITMAP_OP_CLEAR			3
506 
507 static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
508 			    sector_t sector, sector_t n_sectors, int mode)
509 {
510 	unsigned long bit, end_bit, this_end_bit, page, end_page;
511 	unsigned long *data;
512 
513 	if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
514 		DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
515 			sector,
516 			n_sectors,
517 			ic->sb->log2_sectors_per_block,
518 			ic->log2_blocks_per_bitmap_bit,
519 			mode);
520 		BUG();
521 	}
522 
523 	if (unlikely(!n_sectors))
524 		return true;
525 
526 	bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
527 	end_bit = (sector + n_sectors - 1) >>
528 		(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
529 
530 	page = bit / (PAGE_SIZE * 8);
531 	bit %= PAGE_SIZE * 8;
532 
533 	end_page = end_bit / (PAGE_SIZE * 8);
534 	end_bit %= PAGE_SIZE * 8;
535 
536 repeat:
537 	if (page < end_page) {
538 		this_end_bit = PAGE_SIZE * 8 - 1;
539 	} else {
540 		this_end_bit = end_bit;
541 	}
542 
543 	data = lowmem_page_address(bitmap[page].page);
544 
545 	if (mode == BITMAP_OP_TEST_ALL_SET) {
546 		while (bit <= this_end_bit) {
547 			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
548 				do {
549 					if (data[bit / BITS_PER_LONG] != -1)
550 						return false;
551 					bit += BITS_PER_LONG;
552 				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
553 				continue;
554 			}
555 			if (!test_bit(bit, data))
556 				return false;
557 			bit++;
558 		}
559 	} else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
560 		while (bit <= this_end_bit) {
561 			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
562 				do {
563 					if (data[bit / BITS_PER_LONG] != 0)
564 						return false;
565 					bit += BITS_PER_LONG;
566 				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
567 				continue;
568 			}
569 			if (test_bit(bit, data))
570 				return false;
571 			bit++;
572 		}
573 	} else if (mode == BITMAP_OP_SET) {
574 		while (bit <= this_end_bit) {
575 			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
576 				do {
577 					data[bit / BITS_PER_LONG] = -1;
578 					bit += BITS_PER_LONG;
579 				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
580 				continue;
581 			}
582 			__set_bit(bit, data);
583 			bit++;
584 		}
585 	} else if (mode == BITMAP_OP_CLEAR) {
586 		if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
587 			clear_page(data);
588 		else while (bit <= this_end_bit) {
589 			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
590 				do {
591 					data[bit / BITS_PER_LONG] = 0;
592 					bit += BITS_PER_LONG;
593 				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
594 				continue;
595 			}
596 			__clear_bit(bit, data);
597 			bit++;
598 		}
599 	} else {
600 		BUG();
601 	}
602 
603 	if (unlikely(page < end_page)) {
604 		bit = 0;
605 		page++;
606 		goto repeat;
607 	}
608 
609 	return true;
610 }
611 
612 static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
613 {
614 	unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
615 	unsigned i;
616 
617 	for (i = 0; i < n_bitmap_pages; i++) {
618 		unsigned long *dst_data = lowmem_page_address(dst[i].page);
619 		unsigned long *src_data = lowmem_page_address(src[i].page);
620 		copy_page(dst_data, src_data);
621 	}
622 }
623 
624 static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
625 {
626 	unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
627 	unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
628 
629 	BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
630 	return &ic->bbs[bitmap_block];
631 }
632 
633 static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
634 				 bool e, const char *function)
635 {
636 #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
637 	unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
638 
639 	if (unlikely(section >= ic->journal_sections) ||
640 	    unlikely(offset >= limit)) {
641 		DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
642 		       function, section, offset, ic->journal_sections, limit);
643 		BUG();
644 	}
645 #endif
646 }
647 
648 static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
649 			       unsigned *pl_index, unsigned *pl_offset)
650 {
651 	unsigned sector;
652 
653 	access_journal_check(ic, section, offset, false, "page_list_location");
654 
655 	sector = section * ic->journal_section_sectors + offset;
656 
657 	*pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
658 	*pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
659 }
660 
661 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
662 					       unsigned section, unsigned offset, unsigned *n_sectors)
663 {
664 	unsigned pl_index, pl_offset;
665 	char *va;
666 
667 	page_list_location(ic, section, offset, &pl_index, &pl_offset);
668 
669 	if (n_sectors)
670 		*n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
671 
672 	va = lowmem_page_address(pl[pl_index].page);
673 
674 	return (struct journal_sector *)(va + pl_offset);
675 }
676 
677 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
678 {
679 	return access_page_list(ic, ic->journal, section, offset, NULL);
680 }
681 
682 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
683 {
684 	unsigned rel_sector, offset;
685 	struct journal_sector *js;
686 
687 	access_journal_check(ic, section, n, true, "access_journal_entry");
688 
689 	rel_sector = n % JOURNAL_BLOCK_SECTORS;
690 	offset = n / JOURNAL_BLOCK_SECTORS;
691 
692 	js = access_journal(ic, section, rel_sector);
693 	return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
694 }
695 
696 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
697 {
698 	n <<= ic->sb->log2_sectors_per_block;
699 
700 	n += JOURNAL_BLOCK_SECTORS;
701 
702 	access_journal_check(ic, section, n, false, "access_journal_data");
703 
704 	return access_journal(ic, section, n);
705 }
706 
707 static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
708 {
709 	SHASH_DESC_ON_STACK(desc, ic->journal_mac);
710 	int r;
711 	unsigned j, size;
712 
713 	desc->tfm = ic->journal_mac;
714 
715 	r = crypto_shash_init(desc);
716 	if (unlikely(r)) {
717 		dm_integrity_io_error(ic, "crypto_shash_init", r);
718 		goto err;
719 	}
720 
721 	for (j = 0; j < ic->journal_section_entries; j++) {
722 		struct journal_entry *je = access_journal_entry(ic, section, j);
723 		r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
724 		if (unlikely(r)) {
725 			dm_integrity_io_error(ic, "crypto_shash_update", r);
726 			goto err;
727 		}
728 	}
729 
730 	size = crypto_shash_digestsize(ic->journal_mac);
731 
732 	if (likely(size <= JOURNAL_MAC_SIZE)) {
733 		r = crypto_shash_final(desc, result);
734 		if (unlikely(r)) {
735 			dm_integrity_io_error(ic, "crypto_shash_final", r);
736 			goto err;
737 		}
738 		memset(result + size, 0, JOURNAL_MAC_SIZE - size);
739 	} else {
740 		__u8 digest[HASH_MAX_DIGESTSIZE];
741 
742 		if (WARN_ON(size > sizeof(digest))) {
743 			dm_integrity_io_error(ic, "digest_size", -EINVAL);
744 			goto err;
745 		}
746 		r = crypto_shash_final(desc, digest);
747 		if (unlikely(r)) {
748 			dm_integrity_io_error(ic, "crypto_shash_final", r);
749 			goto err;
750 		}
751 		memcpy(result, digest, JOURNAL_MAC_SIZE);
752 	}
753 
754 	return;
755 err:
756 	memset(result, 0, JOURNAL_MAC_SIZE);
757 }
758 
759 static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
760 {
761 	__u8 result[JOURNAL_MAC_SIZE];
762 	unsigned j;
763 
764 	if (!ic->journal_mac)
765 		return;
766 
767 	section_mac(ic, section, result);
768 
769 	for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
770 		struct journal_sector *js = access_journal(ic, section, j);
771 
772 		if (likely(wr))
773 			memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
774 		else {
775 			if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
776 				dm_integrity_io_error(ic, "journal mac", -EILSEQ);
777 		}
778 	}
779 }
780 
781 static void complete_journal_op(void *context)
782 {
783 	struct journal_completion *comp = context;
784 	BUG_ON(!atomic_read(&comp->in_flight));
785 	if (likely(atomic_dec_and_test(&comp->in_flight)))
786 		complete(&comp->comp);
787 }
788 
789 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
790 			unsigned n_sections, struct journal_completion *comp)
791 {
792 	struct async_submit_ctl submit;
793 	size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
794 	unsigned pl_index, pl_offset, section_index;
795 	struct page_list *source_pl, *target_pl;
796 
797 	if (likely(encrypt)) {
798 		source_pl = ic->journal;
799 		target_pl = ic->journal_io;
800 	} else {
801 		source_pl = ic->journal_io;
802 		target_pl = ic->journal;
803 	}
804 
805 	page_list_location(ic, section, 0, &pl_index, &pl_offset);
806 
807 	atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
808 
809 	init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
810 
811 	section_index = pl_index;
812 
813 	do {
814 		size_t this_step;
815 		struct page *src_pages[2];
816 		struct page *dst_page;
817 
818 		while (unlikely(pl_index == section_index)) {
819 			unsigned dummy;
820 			if (likely(encrypt))
821 				rw_section_mac(ic, section, true);
822 			section++;
823 			n_sections--;
824 			if (!n_sections)
825 				break;
826 			page_list_location(ic, section, 0, &section_index, &dummy);
827 		}
828 
829 		this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
830 		dst_page = target_pl[pl_index].page;
831 		src_pages[0] = source_pl[pl_index].page;
832 		src_pages[1] = ic->journal_xor[pl_index].page;
833 
834 		async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
835 
836 		pl_index++;
837 		pl_offset = 0;
838 		n_bytes -= this_step;
839 	} while (n_bytes);
840 
841 	BUG_ON(n_sections);
842 
843 	async_tx_issue_pending_all();
844 }
845 
846 static void complete_journal_encrypt(struct crypto_async_request *req, int err)
847 {
848 	struct journal_completion *comp = req->data;
849 	if (unlikely(err)) {
850 		if (likely(err == -EINPROGRESS)) {
851 			complete(&comp->ic->crypto_backoff);
852 			return;
853 		}
854 		dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
855 	}
856 	complete_journal_op(comp);
857 }
858 
859 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
860 {
861 	int r;
862 	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
863 				      complete_journal_encrypt, comp);
864 	if (likely(encrypt))
865 		r = crypto_skcipher_encrypt(req);
866 	else
867 		r = crypto_skcipher_decrypt(req);
868 	if (likely(!r))
869 		return false;
870 	if (likely(r == -EINPROGRESS))
871 		return true;
872 	if (likely(r == -EBUSY)) {
873 		wait_for_completion(&comp->ic->crypto_backoff);
874 		reinit_completion(&comp->ic->crypto_backoff);
875 		return true;
876 	}
877 	dm_integrity_io_error(comp->ic, "encrypt", r);
878 	return false;
879 }
880 
881 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
882 			  unsigned n_sections, struct journal_completion *comp)
883 {
884 	struct scatterlist **source_sg;
885 	struct scatterlist **target_sg;
886 
887 	atomic_add(2, &comp->in_flight);
888 
889 	if (likely(encrypt)) {
890 		source_sg = ic->journal_scatterlist;
891 		target_sg = ic->journal_io_scatterlist;
892 	} else {
893 		source_sg = ic->journal_io_scatterlist;
894 		target_sg = ic->journal_scatterlist;
895 	}
896 
897 	do {
898 		struct skcipher_request *req;
899 		unsigned ivsize;
900 		char *iv;
901 
902 		if (likely(encrypt))
903 			rw_section_mac(ic, section, true);
904 
905 		req = ic->sk_requests[section];
906 		ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
907 		iv = req->iv;
908 
909 		memcpy(iv, iv + ivsize, ivsize);
910 
911 		req->src = source_sg[section];
912 		req->dst = target_sg[section];
913 
914 		if (unlikely(do_crypt(encrypt, req, comp)))
915 			atomic_inc(&comp->in_flight);
916 
917 		section++;
918 		n_sections--;
919 	} while (n_sections);
920 
921 	atomic_dec(&comp->in_flight);
922 	complete_journal_op(comp);
923 }
924 
925 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
926 			    unsigned n_sections, struct journal_completion *comp)
927 {
928 	if (ic->journal_xor)
929 		return xor_journal(ic, encrypt, section, n_sections, comp);
930 	else
931 		return crypt_journal(ic, encrypt, section, n_sections, comp);
932 }
933 
934 static void complete_journal_io(unsigned long error, void *context)
935 {
936 	struct journal_completion *comp = context;
937 	if (unlikely(error != 0))
938 		dm_integrity_io_error(comp->ic, "writing journal", -EIO);
939 	complete_journal_op(comp);
940 }
941 
942 static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
943 			       unsigned sector, unsigned n_sectors, struct journal_completion *comp)
944 {
945 	struct dm_io_request io_req;
946 	struct dm_io_region io_loc;
947 	unsigned pl_index, pl_offset;
948 	int r;
949 
950 	if (unlikely(dm_integrity_failed(ic))) {
951 		if (comp)
952 			complete_journal_io(-1UL, comp);
953 		return;
954 	}
955 
956 	pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
957 	pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
958 
959 	io_req.bi_op = op;
960 	io_req.bi_op_flags = op_flags;
961 	io_req.mem.type = DM_IO_PAGE_LIST;
962 	if (ic->journal_io)
963 		io_req.mem.ptr.pl = &ic->journal_io[pl_index];
964 	else
965 		io_req.mem.ptr.pl = &ic->journal[pl_index];
966 	io_req.mem.offset = pl_offset;
967 	if (likely(comp != NULL)) {
968 		io_req.notify.fn = complete_journal_io;
969 		io_req.notify.context = comp;
970 	} else {
971 		io_req.notify.fn = NULL;
972 	}
973 	io_req.client = ic->io;
974 	io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
975 	io_loc.sector = ic->start + SB_SECTORS + sector;
976 	io_loc.count = n_sectors;
977 
978 	r = dm_io(&io_req, 1, &io_loc, NULL);
979 	if (unlikely(r)) {
980 		dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
981 		if (comp) {
982 			WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
983 			complete_journal_io(-1UL, comp);
984 		}
985 	}
986 }
987 
988 static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
989 		       unsigned n_sections, struct journal_completion *comp)
990 {
991 	unsigned sector, n_sectors;
992 
993 	sector = section * ic->journal_section_sectors;
994 	n_sectors = n_sections * ic->journal_section_sectors;
995 
996 	rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
997 }
998 
999 static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
1000 {
1001 	struct journal_completion io_comp;
1002 	struct journal_completion crypt_comp_1;
1003 	struct journal_completion crypt_comp_2;
1004 	unsigned i;
1005 
1006 	io_comp.ic = ic;
1007 	init_completion(&io_comp.comp);
1008 
1009 	if (commit_start + commit_sections <= ic->journal_sections) {
1010 		io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1011 		if (ic->journal_io) {
1012 			crypt_comp_1.ic = ic;
1013 			init_completion(&crypt_comp_1.comp);
1014 			crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1015 			encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
1016 			wait_for_completion_io(&crypt_comp_1.comp);
1017 		} else {
1018 			for (i = 0; i < commit_sections; i++)
1019 				rw_section_mac(ic, commit_start + i, true);
1020 		}
1021 		rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
1022 			   commit_sections, &io_comp);
1023 	} else {
1024 		unsigned to_end;
1025 		io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
1026 		to_end = ic->journal_sections - commit_start;
1027 		if (ic->journal_io) {
1028 			crypt_comp_1.ic = ic;
1029 			init_completion(&crypt_comp_1.comp);
1030 			crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1031 			encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
1032 			if (try_wait_for_completion(&crypt_comp_1.comp)) {
1033 				rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1034 				reinit_completion(&crypt_comp_1.comp);
1035 				crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1036 				encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
1037 				wait_for_completion_io(&crypt_comp_1.comp);
1038 			} else {
1039 				crypt_comp_2.ic = ic;
1040 				init_completion(&crypt_comp_2.comp);
1041 				crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
1042 				encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
1043 				wait_for_completion_io(&crypt_comp_1.comp);
1044 				rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1045 				wait_for_completion_io(&crypt_comp_2.comp);
1046 			}
1047 		} else {
1048 			for (i = 0; i < to_end; i++)
1049 				rw_section_mac(ic, commit_start + i, true);
1050 			rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1051 			for (i = 0; i < commit_sections - to_end; i++)
1052 				rw_section_mac(ic, i, true);
1053 		}
1054 		rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
1055 	}
1056 
1057 	wait_for_completion_io(&io_comp.comp);
1058 }
1059 
1060 static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
1061 			      unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
1062 {
1063 	struct dm_io_request io_req;
1064 	struct dm_io_region io_loc;
1065 	int r;
1066 	unsigned sector, pl_index, pl_offset;
1067 
1068 	BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
1069 
1070 	if (unlikely(dm_integrity_failed(ic))) {
1071 		fn(-1UL, data);
1072 		return;
1073 	}
1074 
1075 	sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
1076 
1077 	pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1078 	pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1079 
1080 	io_req.bi_op = REQ_OP_WRITE;
1081 	io_req.bi_op_flags = 0;
1082 	io_req.mem.type = DM_IO_PAGE_LIST;
1083 	io_req.mem.ptr.pl = &ic->journal[pl_index];
1084 	io_req.mem.offset = pl_offset;
1085 	io_req.notify.fn = fn;
1086 	io_req.notify.context = data;
1087 	io_req.client = ic->io;
1088 	io_loc.bdev = ic->dev->bdev;
1089 	io_loc.sector = target;
1090 	io_loc.count = n_sectors;
1091 
1092 	r = dm_io(&io_req, 1, &io_loc, NULL);
1093 	if (unlikely(r)) {
1094 		WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1095 		fn(-1UL, data);
1096 	}
1097 }
1098 
1099 static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1100 {
1101 	return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
1102 	       range1->logical_sector + range1->n_sectors > range2->logical_sector;
1103 }
1104 
1105 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1106 {
1107 	struct rb_node **n = &ic->in_progress.rb_node;
1108 	struct rb_node *parent;
1109 
1110 	BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
1111 
1112 	if (likely(check_waiting)) {
1113 		struct dm_integrity_range *range;
1114 		list_for_each_entry(range, &ic->wait_list, wait_entry) {
1115 			if (unlikely(ranges_overlap(range, new_range)))
1116 				return false;
1117 		}
1118 	}
1119 
1120 	parent = NULL;
1121 
1122 	while (*n) {
1123 		struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
1124 
1125 		parent = *n;
1126 		if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
1127 			n = &range->node.rb_left;
1128 		} else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
1129 			n = &range->node.rb_right;
1130 		} else {
1131 			return false;
1132 		}
1133 	}
1134 
1135 	rb_link_node(&new_range->node, parent, n);
1136 	rb_insert_color(&new_range->node, &ic->in_progress);
1137 
1138 	return true;
1139 }
1140 
1141 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1142 {
1143 	rb_erase(&range->node, &ic->in_progress);
1144 	while (unlikely(!list_empty(&ic->wait_list))) {
1145 		struct dm_integrity_range *last_range =
1146 			list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1147 		struct task_struct *last_range_task;
1148 		last_range_task = last_range->task;
1149 		list_del(&last_range->wait_entry);
1150 		if (!add_new_range(ic, last_range, false)) {
1151 			last_range->task = last_range_task;
1152 			list_add(&last_range->wait_entry, &ic->wait_list);
1153 			break;
1154 		}
1155 		last_range->waiting = false;
1156 		wake_up_process(last_range_task);
1157 	}
1158 }
1159 
1160 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1161 {
1162 	unsigned long flags;
1163 
1164 	spin_lock_irqsave(&ic->endio_wait.lock, flags);
1165 	remove_range_unlocked(ic, range);
1166 	spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1167 }
1168 
1169 static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1170 {
1171 	new_range->waiting = true;
1172 	list_add_tail(&new_range->wait_entry, &ic->wait_list);
1173 	new_range->task = current;
1174 	do {
1175 		__set_current_state(TASK_UNINTERRUPTIBLE);
1176 		spin_unlock_irq(&ic->endio_wait.lock);
1177 		io_schedule();
1178 		spin_lock_irq(&ic->endio_wait.lock);
1179 	} while (unlikely(new_range->waiting));
1180 }
1181 
1182 static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1183 {
1184 	if (unlikely(!add_new_range(ic, new_range, true)))
1185 		wait_and_add_new_range(ic, new_range);
1186 }
1187 
1188 static void init_journal_node(struct journal_node *node)
1189 {
1190 	RB_CLEAR_NODE(&node->node);
1191 	node->sector = (sector_t)-1;
1192 }
1193 
1194 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1195 {
1196 	struct rb_node **link;
1197 	struct rb_node *parent;
1198 
1199 	node->sector = sector;
1200 	BUG_ON(!RB_EMPTY_NODE(&node->node));
1201 
1202 	link = &ic->journal_tree_root.rb_node;
1203 	parent = NULL;
1204 
1205 	while (*link) {
1206 		struct journal_node *j;
1207 		parent = *link;
1208 		j = container_of(parent, struct journal_node, node);
1209 		if (sector < j->sector)
1210 			link = &j->node.rb_left;
1211 		else
1212 			link = &j->node.rb_right;
1213 	}
1214 
1215 	rb_link_node(&node->node, parent, link);
1216 	rb_insert_color(&node->node, &ic->journal_tree_root);
1217 }
1218 
1219 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1220 {
1221 	BUG_ON(RB_EMPTY_NODE(&node->node));
1222 	rb_erase(&node->node, &ic->journal_tree_root);
1223 	init_journal_node(node);
1224 }
1225 
1226 #define NOT_FOUND	(-1U)
1227 
1228 static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1229 {
1230 	struct rb_node *n = ic->journal_tree_root.rb_node;
1231 	unsigned found = NOT_FOUND;
1232 	*next_sector = (sector_t)-1;
1233 	while (n) {
1234 		struct journal_node *j = container_of(n, struct journal_node, node);
1235 		if (sector == j->sector) {
1236 			found = j - ic->journal_tree;
1237 		}
1238 		if (sector < j->sector) {
1239 			*next_sector = j->sector;
1240 			n = j->node.rb_left;
1241 		} else {
1242 			n = j->node.rb_right;
1243 		}
1244 	}
1245 
1246 	return found;
1247 }
1248 
1249 static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
1250 {
1251 	struct journal_node *node, *next_node;
1252 	struct rb_node *next;
1253 
1254 	if (unlikely(pos >= ic->journal_entries))
1255 		return false;
1256 	node = &ic->journal_tree[pos];
1257 	if (unlikely(RB_EMPTY_NODE(&node->node)))
1258 		return false;
1259 	if (unlikely(node->sector != sector))
1260 		return false;
1261 
1262 	next = rb_next(&node->node);
1263 	if (unlikely(!next))
1264 		return true;
1265 
1266 	next_node = container_of(next, struct journal_node, node);
1267 	return next_node->sector != sector;
1268 }
1269 
1270 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1271 {
1272 	struct rb_node *next;
1273 	struct journal_node *next_node;
1274 	unsigned next_section;
1275 
1276 	BUG_ON(RB_EMPTY_NODE(&node->node));
1277 
1278 	next = rb_next(&node->node);
1279 	if (unlikely(!next))
1280 		return false;
1281 
1282 	next_node = container_of(next, struct journal_node, node);
1283 
1284 	if (next_node->sector != node->sector)
1285 		return false;
1286 
1287 	next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1288 	if (next_section >= ic->committed_section &&
1289 	    next_section < ic->committed_section + ic->n_committed_sections)
1290 		return true;
1291 	if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1292 		return true;
1293 
1294 	return false;
1295 }
1296 
1297 #define TAG_READ	0
1298 #define TAG_WRITE	1
1299 #define TAG_CMP		2
1300 
1301 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1302 			       unsigned *metadata_offset, unsigned total_size, int op)
1303 {
1304 #define MAY_BE_FILLER		1
1305 #define MAY_BE_HASH		2
1306 	unsigned hash_offset = 0;
1307 	unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1308 
1309 	do {
1310 		unsigned char *data, *dp;
1311 		struct dm_buffer *b;
1312 		unsigned to_copy;
1313 		int r;
1314 
1315 		r = dm_integrity_failed(ic);
1316 		if (unlikely(r))
1317 			return r;
1318 
1319 		data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1320 		if (IS_ERR(data))
1321 			return PTR_ERR(data);
1322 
1323 		to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1324 		dp = data + *metadata_offset;
1325 		if (op == TAG_READ) {
1326 			memcpy(tag, dp, to_copy);
1327 		} else if (op == TAG_WRITE) {
1328 			memcpy(dp, tag, to_copy);
1329 			dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
1330 		} else {
1331 			/* e.g.: op == TAG_CMP */
1332 
1333 			if (likely(is_power_of_2(ic->tag_size))) {
1334 				if (unlikely(memcmp(dp, tag, to_copy)))
1335 					if (unlikely(!ic->discard) ||
1336 					    unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
1337 						goto thorough_test;
1338 				}
1339 			} else {
1340 				unsigned i, ts;
1341 thorough_test:
1342 				ts = total_size;
1343 
1344 				for (i = 0; i < to_copy; i++, ts--) {
1345 					if (unlikely(dp[i] != tag[i]))
1346 						may_be &= ~MAY_BE_HASH;
1347 					if (likely(dp[i] != DISCARD_FILLER))
1348 						may_be &= ~MAY_BE_FILLER;
1349 					hash_offset++;
1350 					if (unlikely(hash_offset == ic->tag_size)) {
1351 						if (unlikely(!may_be)) {
1352 							dm_bufio_release(b);
1353 							return ts;
1354 						}
1355 						hash_offset = 0;
1356 						may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1357 					}
1358 				}
1359 			}
1360 		}
1361 		dm_bufio_release(b);
1362 
1363 		tag += to_copy;
1364 		*metadata_offset += to_copy;
1365 		if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1366 			(*metadata_block)++;
1367 			*metadata_offset = 0;
1368 		}
1369 
1370 		if (unlikely(!is_power_of_2(ic->tag_size))) {
1371 			hash_offset = (hash_offset + to_copy) % ic->tag_size;
1372 		}
1373 
1374 		total_size -= to_copy;
1375 	} while (unlikely(total_size));
1376 
1377 	return 0;
1378 #undef MAY_BE_FILLER
1379 #undef MAY_BE_HASH
1380 }
1381 
1382 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
1383 {
1384 	int r;
1385 	r = dm_bufio_write_dirty_buffers(ic->bufio);
1386 	if (unlikely(r))
1387 		dm_integrity_io_error(ic, "writing tags", r);
1388 }
1389 
1390 static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1391 {
1392 	DECLARE_WAITQUEUE(wait, current);
1393 	__add_wait_queue(&ic->endio_wait, &wait);
1394 	__set_current_state(TASK_UNINTERRUPTIBLE);
1395 	spin_unlock_irq(&ic->endio_wait.lock);
1396 	io_schedule();
1397 	spin_lock_irq(&ic->endio_wait.lock);
1398 	__remove_wait_queue(&ic->endio_wait, &wait);
1399 }
1400 
1401 static void autocommit_fn(struct timer_list *t)
1402 {
1403 	struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1404 
1405 	if (likely(!dm_integrity_failed(ic)))
1406 		queue_work(ic->commit_wq, &ic->commit_work);
1407 }
1408 
1409 static void schedule_autocommit(struct dm_integrity_c *ic)
1410 {
1411 	if (!timer_pending(&ic->autocommit_timer))
1412 		mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1413 }
1414 
1415 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1416 {
1417 	struct bio *bio;
1418 	unsigned long flags;
1419 
1420 	spin_lock_irqsave(&ic->endio_wait.lock, flags);
1421 	bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1422 	bio_list_add(&ic->flush_bio_list, bio);
1423 	spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1424 
1425 	queue_work(ic->commit_wq, &ic->commit_work);
1426 }
1427 
1428 static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1429 {
1430 	int r = dm_integrity_failed(ic);
1431 	if (unlikely(r) && !bio->bi_status)
1432 		bio->bi_status = errno_to_blk_status(r);
1433 	if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
1434 		unsigned long flags;
1435 		spin_lock_irqsave(&ic->endio_wait.lock, flags);
1436 		bio_list_add(&ic->synchronous_bios, bio);
1437 		queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
1438 		spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1439 		return;
1440 	}
1441 	bio_endio(bio);
1442 }
1443 
1444 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1445 {
1446 	struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1447 
1448 	if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1449 		submit_flush_bio(ic, dio);
1450 	else
1451 		do_endio(ic, bio);
1452 }
1453 
1454 static void dec_in_flight(struct dm_integrity_io *dio)
1455 {
1456 	if (atomic_dec_and_test(&dio->in_flight)) {
1457 		struct dm_integrity_c *ic = dio->ic;
1458 		struct bio *bio;
1459 
1460 		remove_range(ic, &dio->range);
1461 
1462 		if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
1463 			schedule_autocommit(ic);
1464 
1465 		bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1466 
1467 		if (unlikely(dio->bi_status) && !bio->bi_status)
1468 			bio->bi_status = dio->bi_status;
1469 		if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1470 			dio->range.logical_sector += dio->range.n_sectors;
1471 			bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1472 			INIT_WORK(&dio->work, integrity_bio_wait);
1473 			queue_work(ic->offload_wq, &dio->work);
1474 			return;
1475 		}
1476 		do_endio_flush(ic, dio);
1477 	}
1478 }
1479 
1480 static void integrity_end_io(struct bio *bio)
1481 {
1482 	struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1483 
1484 	dm_bio_restore(&dio->bio_details, bio);
1485 	if (bio->bi_integrity)
1486 		bio->bi_opf |= REQ_INTEGRITY;
1487 
1488 	if (dio->completion)
1489 		complete(dio->completion);
1490 
1491 	dec_in_flight(dio);
1492 }
1493 
1494 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1495 				      const char *data, char *result)
1496 {
1497 	__u64 sector_le = cpu_to_le64(sector);
1498 	SHASH_DESC_ON_STACK(req, ic->internal_hash);
1499 	int r;
1500 	unsigned digest_size;
1501 
1502 	req->tfm = ic->internal_hash;
1503 
1504 	r = crypto_shash_init(req);
1505 	if (unlikely(r < 0)) {
1506 		dm_integrity_io_error(ic, "crypto_shash_init", r);
1507 		goto failed;
1508 	}
1509 
1510 	r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
1511 	if (unlikely(r < 0)) {
1512 		dm_integrity_io_error(ic, "crypto_shash_update", r);
1513 		goto failed;
1514 	}
1515 
1516 	r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1517 	if (unlikely(r < 0)) {
1518 		dm_integrity_io_error(ic, "crypto_shash_update", r);
1519 		goto failed;
1520 	}
1521 
1522 	r = crypto_shash_final(req, result);
1523 	if (unlikely(r < 0)) {
1524 		dm_integrity_io_error(ic, "crypto_shash_final", r);
1525 		goto failed;
1526 	}
1527 
1528 	digest_size = crypto_shash_digestsize(ic->internal_hash);
1529 	if (unlikely(digest_size < ic->tag_size))
1530 		memset(result + digest_size, 0, ic->tag_size - digest_size);
1531 
1532 	return;
1533 
1534 failed:
1535 	/* this shouldn't happen anyway, the hash functions have no reason to fail */
1536 	get_random_bytes(result, ic->tag_size);
1537 }
1538 
1539 static void integrity_metadata(struct work_struct *w)
1540 {
1541 	struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1542 	struct dm_integrity_c *ic = dio->ic;
1543 
1544 	int r;
1545 
1546 	if (ic->internal_hash) {
1547 		struct bvec_iter iter;
1548 		struct bio_vec bv;
1549 		unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1550 		struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1551 		char *checksums;
1552 		unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1553 		char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1554 		sector_t sector;
1555 		unsigned sectors_to_process;
1556 
1557 		if (unlikely(ic->mode == 'R'))
1558 			goto skip_io;
1559 
1560 		if (likely(dio->op != REQ_OP_DISCARD))
1561 			checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1562 					    GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1563 		else
1564 			checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1565 		if (!checksums) {
1566 			checksums = checksums_onstack;
1567 			if (WARN_ON(extra_space &&
1568 				    digest_size > sizeof(checksums_onstack))) {
1569 				r = -EINVAL;
1570 				goto error;
1571 			}
1572 		}
1573 
1574 		if (unlikely(dio->op == REQ_OP_DISCARD)) {
1575 			sector_t bi_sector = dio->bio_details.bi_iter.bi_sector;
1576 			unsigned bi_size = dio->bio_details.bi_iter.bi_size;
1577 			unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
1578 			unsigned max_blocks = max_size / ic->tag_size;
1579 			memset(checksums, DISCARD_FILLER, max_size);
1580 
1581 			while (bi_size) {
1582 				unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1583 				this_step_blocks = min(this_step_blocks, max_blocks);
1584 				r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1585 							this_step_blocks * ic->tag_size, TAG_WRITE);
1586 				if (unlikely(r)) {
1587 					if (likely(checksums != checksums_onstack))
1588 						kfree(checksums);
1589 					goto error;
1590 				}
1591 
1592 				/*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
1593 					printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
1594 					printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
1595 					BUG();
1596 				}*/
1597 				bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1598 				bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
1599 			}
1600 
1601 			if (likely(checksums != checksums_onstack))
1602 				kfree(checksums);
1603 			goto skip_io;
1604 		}
1605 
1606 		sector = dio->range.logical_sector;
1607 		sectors_to_process = dio->range.n_sectors;
1608 
1609 		__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1610 			unsigned pos;
1611 			char *mem, *checksums_ptr;
1612 
1613 again:
1614 			mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1615 			pos = 0;
1616 			checksums_ptr = checksums;
1617 			do {
1618 				integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1619 				checksums_ptr += ic->tag_size;
1620 				sectors_to_process -= ic->sectors_per_block;
1621 				pos += ic->sectors_per_block << SECTOR_SHIFT;
1622 				sector += ic->sectors_per_block;
1623 			} while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1624 			kunmap_atomic(mem);
1625 
1626 			r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1627 						checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
1628 			if (unlikely(r)) {
1629 				if (r > 0) {
1630 					char b[BDEVNAME_SIZE];
1631 					DMERR_LIMIT("%s: Checksum failed at sector 0x%llx", bio_devname(bio, b),
1632 						    (sector - ((r + ic->tag_size - 1) / ic->tag_size)));
1633 					r = -EILSEQ;
1634 					atomic64_inc(&ic->number_of_mismatches);
1635 				}
1636 				if (likely(checksums != checksums_onstack))
1637 					kfree(checksums);
1638 				goto error;
1639 			}
1640 
1641 			if (!sectors_to_process)
1642 				break;
1643 
1644 			if (unlikely(pos < bv.bv_len)) {
1645 				bv.bv_offset += pos;
1646 				bv.bv_len -= pos;
1647 				goto again;
1648 			}
1649 		}
1650 
1651 		if (likely(checksums != checksums_onstack))
1652 			kfree(checksums);
1653 	} else {
1654 		struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
1655 
1656 		if (bip) {
1657 			struct bio_vec biv;
1658 			struct bvec_iter iter;
1659 			unsigned data_to_process = dio->range.n_sectors;
1660 			sector_to_block(ic, data_to_process);
1661 			data_to_process *= ic->tag_size;
1662 
1663 			bip_for_each_vec(biv, bip, iter) {
1664 				unsigned char *tag;
1665 				unsigned this_len;
1666 
1667 				BUG_ON(PageHighMem(biv.bv_page));
1668 				tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1669 				this_len = min(biv.bv_len, data_to_process);
1670 				r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1671 							this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
1672 				if (unlikely(r))
1673 					goto error;
1674 				data_to_process -= this_len;
1675 				if (!data_to_process)
1676 					break;
1677 			}
1678 		}
1679 	}
1680 skip_io:
1681 	dec_in_flight(dio);
1682 	return;
1683 error:
1684 	dio->bi_status = errno_to_blk_status(r);
1685 	dec_in_flight(dio);
1686 }
1687 
1688 static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1689 {
1690 	struct dm_integrity_c *ic = ti->private;
1691 	struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1692 	struct bio_integrity_payload *bip;
1693 
1694 	sector_t area, offset;
1695 
1696 	dio->ic = ic;
1697 	dio->bi_status = 0;
1698 	dio->op = bio_op(bio);
1699 
1700 	if (unlikely(dio->op == REQ_OP_DISCARD)) {
1701 		if (ti->max_io_len) {
1702 			sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
1703 			unsigned log2_max_io_len = __fls(ti->max_io_len);
1704 			sector_t start_boundary = sec >> log2_max_io_len;
1705 			sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
1706 			if (start_boundary < end_boundary) {
1707 				sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
1708 				dm_accept_partial_bio(bio, len);
1709 			}
1710 		}
1711 	}
1712 
1713 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1714 		submit_flush_bio(ic, dio);
1715 		return DM_MAPIO_SUBMITTED;
1716 	}
1717 
1718 	dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1719 	dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
1720 	if (unlikely(dio->fua)) {
1721 		/*
1722 		 * Don't pass down the FUA flag because we have to flush
1723 		 * disk cache anyway.
1724 		 */
1725 		bio->bi_opf &= ~REQ_FUA;
1726 	}
1727 	if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1728 		DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1729 		      dio->range.logical_sector, bio_sectors(bio),
1730 		      ic->provided_data_sectors);
1731 		return DM_MAPIO_KILL;
1732 	}
1733 	if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1734 		DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1735 		      ic->sectors_per_block,
1736 		      dio->range.logical_sector, bio_sectors(bio));
1737 		return DM_MAPIO_KILL;
1738 	}
1739 
1740 	if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
1741 		struct bvec_iter iter;
1742 		struct bio_vec bv;
1743 		bio_for_each_segment(bv, bio, iter) {
1744 			if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1745 				DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1746 					bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1747 				return DM_MAPIO_KILL;
1748 			}
1749 		}
1750 	}
1751 
1752 	bip = bio_integrity(bio);
1753 	if (!ic->internal_hash) {
1754 		if (bip) {
1755 			unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1756 			if (ic->log2_tag_size >= 0)
1757 				wanted_tag_size <<= ic->log2_tag_size;
1758 			else
1759 				wanted_tag_size *= ic->tag_size;
1760 			if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1761 				DMERR("Invalid integrity data size %u, expected %u",
1762 				      bip->bip_iter.bi_size, wanted_tag_size);
1763 				return DM_MAPIO_KILL;
1764 			}
1765 		}
1766 	} else {
1767 		if (unlikely(bip != NULL)) {
1768 			DMERR("Unexpected integrity data when using internal hash");
1769 			return DM_MAPIO_KILL;
1770 		}
1771 	}
1772 
1773 	if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
1774 		return DM_MAPIO_KILL;
1775 
1776 	get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1777 	dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1778 	bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1779 
1780 	dm_integrity_map_continue(dio, true);
1781 	return DM_MAPIO_SUBMITTED;
1782 }
1783 
1784 static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1785 				 unsigned journal_section, unsigned journal_entry)
1786 {
1787 	struct dm_integrity_c *ic = dio->ic;
1788 	sector_t logical_sector;
1789 	unsigned n_sectors;
1790 
1791 	logical_sector = dio->range.logical_sector;
1792 	n_sectors = dio->range.n_sectors;
1793 	do {
1794 		struct bio_vec bv = bio_iovec(bio);
1795 		char *mem;
1796 
1797 		if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1798 			bv.bv_len = n_sectors << SECTOR_SHIFT;
1799 		n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1800 		bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1801 retry_kmap:
1802 		mem = kmap_atomic(bv.bv_page);
1803 		if (likely(dio->op == REQ_OP_WRITE))
1804 			flush_dcache_page(bv.bv_page);
1805 
1806 		do {
1807 			struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1808 
1809 			if (unlikely(dio->op == REQ_OP_READ)) {
1810 				struct journal_sector *js;
1811 				char *mem_ptr;
1812 				unsigned s;
1813 
1814 				if (unlikely(journal_entry_is_inprogress(je))) {
1815 					flush_dcache_page(bv.bv_page);
1816 					kunmap_atomic(mem);
1817 
1818 					__io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1819 					goto retry_kmap;
1820 				}
1821 				smp_rmb();
1822 				BUG_ON(journal_entry_get_sector(je) != logical_sector);
1823 				js = access_journal_data(ic, journal_section, journal_entry);
1824 				mem_ptr = mem + bv.bv_offset;
1825 				s = 0;
1826 				do {
1827 					memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1828 					*(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1829 					js++;
1830 					mem_ptr += 1 << SECTOR_SHIFT;
1831 				} while (++s < ic->sectors_per_block);
1832 #ifdef INTERNAL_VERIFY
1833 				if (ic->internal_hash) {
1834 					char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1835 
1836 					integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1837 					if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
1838 						DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
1839 							    logical_sector);
1840 					}
1841 				}
1842 #endif
1843 			}
1844 
1845 			if (!ic->internal_hash) {
1846 				struct bio_integrity_payload *bip = bio_integrity(bio);
1847 				unsigned tag_todo = ic->tag_size;
1848 				char *tag_ptr = journal_entry_tag(ic, je);
1849 
1850 				if (bip) do {
1851 					struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1852 					unsigned tag_now = min(biv.bv_len, tag_todo);
1853 					char *tag_addr;
1854 					BUG_ON(PageHighMem(biv.bv_page));
1855 					tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1856 					if (likely(dio->op == REQ_OP_WRITE))
1857 						memcpy(tag_ptr, tag_addr, tag_now);
1858 					else
1859 						memcpy(tag_addr, tag_ptr, tag_now);
1860 					bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
1861 					tag_ptr += tag_now;
1862 					tag_todo -= tag_now;
1863 				} while (unlikely(tag_todo)); else {
1864 					if (likely(dio->op == REQ_OP_WRITE))
1865 						memset(tag_ptr, 0, tag_todo);
1866 				}
1867 			}
1868 
1869 			if (likely(dio->op == REQ_OP_WRITE)) {
1870 				struct journal_sector *js;
1871 				unsigned s;
1872 
1873 				js = access_journal_data(ic, journal_section, journal_entry);
1874 				memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
1875 
1876 				s = 0;
1877 				do {
1878 					je->last_bytes[s] = js[s].commit_id;
1879 				} while (++s < ic->sectors_per_block);
1880 
1881 				if (ic->internal_hash) {
1882 					unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1883 					if (unlikely(digest_size > ic->tag_size)) {
1884 						char checksums_onstack[HASH_MAX_DIGESTSIZE];
1885 						integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
1886 						memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
1887 					} else
1888 						integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
1889 				}
1890 
1891 				journal_entry_set_sector(je, logical_sector);
1892 			}
1893 			logical_sector += ic->sectors_per_block;
1894 
1895 			journal_entry++;
1896 			if (unlikely(journal_entry == ic->journal_section_entries)) {
1897 				journal_entry = 0;
1898 				journal_section++;
1899 				wraparound_section(ic, &journal_section);
1900 			}
1901 
1902 			bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
1903 		} while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
1904 
1905 		if (unlikely(dio->op == REQ_OP_READ))
1906 			flush_dcache_page(bv.bv_page);
1907 		kunmap_atomic(mem);
1908 	} while (n_sectors);
1909 
1910 	if (likely(dio->op == REQ_OP_WRITE)) {
1911 		smp_mb();
1912 		if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1913 			wake_up(&ic->copy_to_journal_wait);
1914 		if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
1915 			queue_work(ic->commit_wq, &ic->commit_work);
1916 		} else {
1917 			schedule_autocommit(ic);
1918 		}
1919 	} else {
1920 		remove_range(ic, &dio->range);
1921 	}
1922 
1923 	if (unlikely(bio->bi_iter.bi_size)) {
1924 		sector_t area, offset;
1925 
1926 		dio->range.logical_sector = logical_sector;
1927 		get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1928 		dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1929 		return true;
1930 	}
1931 
1932 	return false;
1933 }
1934 
1935 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
1936 {
1937 	struct dm_integrity_c *ic = dio->ic;
1938 	struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1939 	unsigned journal_section, journal_entry;
1940 	unsigned journal_read_pos;
1941 	struct completion read_comp;
1942 	bool discard_retried = false;
1943 	bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
1944 	if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
1945 		need_sync_io = true;
1946 
1947 	if (need_sync_io && from_map) {
1948 		INIT_WORK(&dio->work, integrity_bio_wait);
1949 		queue_work(ic->offload_wq, &dio->work);
1950 		return;
1951 	}
1952 
1953 lock_retry:
1954 	spin_lock_irq(&ic->endio_wait.lock);
1955 retry:
1956 	if (unlikely(dm_integrity_failed(ic))) {
1957 		spin_unlock_irq(&ic->endio_wait.lock);
1958 		do_endio(ic, bio);
1959 		return;
1960 	}
1961 	dio->range.n_sectors = bio_sectors(bio);
1962 	journal_read_pos = NOT_FOUND;
1963 	if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
1964 		if (dio->op == REQ_OP_WRITE) {
1965 			unsigned next_entry, i, pos;
1966 			unsigned ws, we, range_sectors;
1967 
1968 			dio->range.n_sectors = min(dio->range.n_sectors,
1969 						   (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
1970 			if (unlikely(!dio->range.n_sectors)) {
1971 				if (from_map)
1972 					goto offload_to_thread;
1973 				sleep_on_endio_wait(ic);
1974 				goto retry;
1975 			}
1976 			range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
1977 			ic->free_sectors -= range_sectors;
1978 			journal_section = ic->free_section;
1979 			journal_entry = ic->free_section_entry;
1980 
1981 			next_entry = ic->free_section_entry + range_sectors;
1982 			ic->free_section_entry = next_entry % ic->journal_section_entries;
1983 			ic->free_section += next_entry / ic->journal_section_entries;
1984 			ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
1985 			wraparound_section(ic, &ic->free_section);
1986 
1987 			pos = journal_section * ic->journal_section_entries + journal_entry;
1988 			ws = journal_section;
1989 			we = journal_entry;
1990 			i = 0;
1991 			do {
1992 				struct journal_entry *je;
1993 
1994 				add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
1995 				pos++;
1996 				if (unlikely(pos >= ic->journal_entries))
1997 					pos = 0;
1998 
1999 				je = access_journal_entry(ic, ws, we);
2000 				BUG_ON(!journal_entry_is_unused(je));
2001 				journal_entry_set_inprogress(je);
2002 				we++;
2003 				if (unlikely(we == ic->journal_section_entries)) {
2004 					we = 0;
2005 					ws++;
2006 					wraparound_section(ic, &ws);
2007 				}
2008 			} while ((i += ic->sectors_per_block) < dio->range.n_sectors);
2009 
2010 			spin_unlock_irq(&ic->endio_wait.lock);
2011 			goto journal_read_write;
2012 		} else {
2013 			sector_t next_sector;
2014 			journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2015 			if (likely(journal_read_pos == NOT_FOUND)) {
2016 				if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
2017 					dio->range.n_sectors = next_sector - dio->range.logical_sector;
2018 			} else {
2019 				unsigned i;
2020 				unsigned jp = journal_read_pos + 1;
2021 				for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
2022 					if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
2023 						break;
2024 				}
2025 				dio->range.n_sectors = i;
2026 			}
2027 		}
2028 	}
2029 	if (unlikely(!add_new_range(ic, &dio->range, true))) {
2030 		/*
2031 		 * We must not sleep in the request routine because it could
2032 		 * stall bios on current->bio_list.
2033 		 * So, we offload the bio to a workqueue if we have to sleep.
2034 		 */
2035 		if (from_map) {
2036 offload_to_thread:
2037 			spin_unlock_irq(&ic->endio_wait.lock);
2038 			INIT_WORK(&dio->work, integrity_bio_wait);
2039 			queue_work(ic->wait_wq, &dio->work);
2040 			return;
2041 		}
2042 		if (journal_read_pos != NOT_FOUND)
2043 			dio->range.n_sectors = ic->sectors_per_block;
2044 		wait_and_add_new_range(ic, &dio->range);
2045 		/*
2046 		 * wait_and_add_new_range drops the spinlock, so the journal
2047 		 * may have been changed arbitrarily. We need to recheck.
2048 		 * To simplify the code, we restrict I/O size to just one block.
2049 		 */
2050 		if (journal_read_pos != NOT_FOUND) {
2051 			sector_t next_sector;
2052 			unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2053 			if (unlikely(new_pos != journal_read_pos)) {
2054 				remove_range_unlocked(ic, &dio->range);
2055 				goto retry;
2056 			}
2057 		}
2058 	}
2059 	if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
2060 		sector_t next_sector;
2061 		unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2062 		if (unlikely(new_pos != NOT_FOUND) ||
2063 		    unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
2064 			remove_range_unlocked(ic, &dio->range);
2065 			spin_unlock_irq(&ic->endio_wait.lock);
2066 			queue_work(ic->commit_wq, &ic->commit_work);
2067 			flush_workqueue(ic->commit_wq);
2068 			queue_work(ic->writer_wq, &ic->writer_work);
2069 			flush_workqueue(ic->writer_wq);
2070 			discard_retried = true;
2071 			goto lock_retry;
2072 		}
2073 	}
2074 	spin_unlock_irq(&ic->endio_wait.lock);
2075 
2076 	if (unlikely(journal_read_pos != NOT_FOUND)) {
2077 		journal_section = journal_read_pos / ic->journal_section_entries;
2078 		journal_entry = journal_read_pos % ic->journal_section_entries;
2079 		goto journal_read_write;
2080 	}
2081 
2082 	if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
2083 		if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2084 				     dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2085 			struct bitmap_block_status *bbs;
2086 
2087 			bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
2088 			spin_lock(&bbs->bio_queue_lock);
2089 			bio_list_add(&bbs->bio_queue, bio);
2090 			spin_unlock(&bbs->bio_queue_lock);
2091 			queue_work(ic->writer_wq, &bbs->work);
2092 			return;
2093 		}
2094 	}
2095 
2096 	dio->in_flight = (atomic_t)ATOMIC_INIT(2);
2097 
2098 	if (need_sync_io) {
2099 		init_completion(&read_comp);
2100 		dio->completion = &read_comp;
2101 	} else
2102 		dio->completion = NULL;
2103 
2104 	dm_bio_record(&dio->bio_details, bio);
2105 	bio_set_dev(bio, ic->dev->bdev);
2106 	bio->bi_integrity = NULL;
2107 	bio->bi_opf &= ~REQ_INTEGRITY;
2108 	bio->bi_end_io = integrity_end_io;
2109 	bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
2110 
2111 	if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
2112 		integrity_metadata(&dio->work);
2113 		dm_integrity_flush_buffers(ic);
2114 
2115 		dio->in_flight = (atomic_t)ATOMIC_INIT(1);
2116 		dio->completion = NULL;
2117 
2118 		generic_make_request(bio);
2119 
2120 		return;
2121 	}
2122 
2123 	generic_make_request(bio);
2124 
2125 	if (need_sync_io) {
2126 		wait_for_completion_io(&read_comp);
2127 		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
2128 		    dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
2129 			goto skip_check;
2130 		if (ic->mode == 'B') {
2131 			if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2132 					     dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2133 				goto skip_check;
2134 		}
2135 
2136 		if (likely(!bio->bi_status))
2137 			integrity_metadata(&dio->work);
2138 		else
2139 skip_check:
2140 			dec_in_flight(dio);
2141 
2142 	} else {
2143 		INIT_WORK(&dio->work, integrity_metadata);
2144 		queue_work(ic->metadata_wq, &dio->work);
2145 	}
2146 
2147 	return;
2148 
2149 journal_read_write:
2150 	if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
2151 		goto lock_retry;
2152 
2153 	do_endio_flush(ic, dio);
2154 }
2155 
2156 
2157 static void integrity_bio_wait(struct work_struct *w)
2158 {
2159 	struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2160 
2161 	dm_integrity_map_continue(dio, false);
2162 }
2163 
2164 static void pad_uncommitted(struct dm_integrity_c *ic)
2165 {
2166 	if (ic->free_section_entry) {
2167 		ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
2168 		ic->free_section_entry = 0;
2169 		ic->free_section++;
2170 		wraparound_section(ic, &ic->free_section);
2171 		ic->n_uncommitted_sections++;
2172 	}
2173 	if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
2174 		    (ic->n_uncommitted_sections + ic->n_committed_sections) *
2175 		    ic->journal_section_entries + ic->free_sectors)) {
2176 		DMCRIT("journal_sections %u, journal_section_entries %u, "
2177 		       "n_uncommitted_sections %u, n_committed_sections %u, "
2178 		       "journal_section_entries %u, free_sectors %u",
2179 		       ic->journal_sections, ic->journal_section_entries,
2180 		       ic->n_uncommitted_sections, ic->n_committed_sections,
2181 		       ic->journal_section_entries, ic->free_sectors);
2182 	}
2183 }
2184 
2185 static void integrity_commit(struct work_struct *w)
2186 {
2187 	struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
2188 	unsigned commit_start, commit_sections;
2189 	unsigned i, j, n;
2190 	struct bio *flushes;
2191 
2192 	del_timer(&ic->autocommit_timer);
2193 
2194 	spin_lock_irq(&ic->endio_wait.lock);
2195 	flushes = bio_list_get(&ic->flush_bio_list);
2196 	if (unlikely(ic->mode != 'J')) {
2197 		spin_unlock_irq(&ic->endio_wait.lock);
2198 		dm_integrity_flush_buffers(ic);
2199 		goto release_flush_bios;
2200 	}
2201 
2202 	pad_uncommitted(ic);
2203 	commit_start = ic->uncommitted_section;
2204 	commit_sections = ic->n_uncommitted_sections;
2205 	spin_unlock_irq(&ic->endio_wait.lock);
2206 
2207 	if (!commit_sections)
2208 		goto release_flush_bios;
2209 
2210 	i = commit_start;
2211 	for (n = 0; n < commit_sections; n++) {
2212 		for (j = 0; j < ic->journal_section_entries; j++) {
2213 			struct journal_entry *je;
2214 			je = access_journal_entry(ic, i, j);
2215 			io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2216 		}
2217 		for (j = 0; j < ic->journal_section_sectors; j++) {
2218 			struct journal_sector *js;
2219 			js = access_journal(ic, i, j);
2220 			js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
2221 		}
2222 		i++;
2223 		if (unlikely(i >= ic->journal_sections))
2224 			ic->commit_seq = next_commit_seq(ic->commit_seq);
2225 		wraparound_section(ic, &i);
2226 	}
2227 	smp_rmb();
2228 
2229 	write_journal(ic, commit_start, commit_sections);
2230 
2231 	spin_lock_irq(&ic->endio_wait.lock);
2232 	ic->uncommitted_section += commit_sections;
2233 	wraparound_section(ic, &ic->uncommitted_section);
2234 	ic->n_uncommitted_sections -= commit_sections;
2235 	ic->n_committed_sections += commit_sections;
2236 	spin_unlock_irq(&ic->endio_wait.lock);
2237 
2238 	if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2239 		queue_work(ic->writer_wq, &ic->writer_work);
2240 
2241 release_flush_bios:
2242 	while (flushes) {
2243 		struct bio *next = flushes->bi_next;
2244 		flushes->bi_next = NULL;
2245 		do_endio(ic, flushes);
2246 		flushes = next;
2247 	}
2248 }
2249 
2250 static void complete_copy_from_journal(unsigned long error, void *context)
2251 {
2252 	struct journal_io *io = context;
2253 	struct journal_completion *comp = io->comp;
2254 	struct dm_integrity_c *ic = comp->ic;
2255 	remove_range(ic, &io->range);
2256 	mempool_free(io, &ic->journal_io_mempool);
2257 	if (unlikely(error != 0))
2258 		dm_integrity_io_error(ic, "copying from journal", -EIO);
2259 	complete_journal_op(comp);
2260 }
2261 
2262 static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
2263 			       struct journal_entry *je)
2264 {
2265 	unsigned s = 0;
2266 	do {
2267 		js->commit_id = je->last_bytes[s];
2268 		js++;
2269 	} while (++s < ic->sectors_per_block);
2270 }
2271 
2272 static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
2273 			     unsigned write_sections, bool from_replay)
2274 {
2275 	unsigned i, j, n;
2276 	struct journal_completion comp;
2277 	struct blk_plug plug;
2278 
2279 	blk_start_plug(&plug);
2280 
2281 	comp.ic = ic;
2282 	comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2283 	init_completion(&comp.comp);
2284 
2285 	i = write_start;
2286 	for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
2287 #ifndef INTERNAL_VERIFY
2288 		if (unlikely(from_replay))
2289 #endif
2290 			rw_section_mac(ic, i, false);
2291 		for (j = 0; j < ic->journal_section_entries; j++) {
2292 			struct journal_entry *je = access_journal_entry(ic, i, j);
2293 			sector_t sec, area, offset;
2294 			unsigned k, l, next_loop;
2295 			sector_t metadata_block;
2296 			unsigned metadata_offset;
2297 			struct journal_io *io;
2298 
2299 			if (journal_entry_is_unused(je))
2300 				continue;
2301 			BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
2302 			sec = journal_entry_get_sector(je);
2303 			if (unlikely(from_replay)) {
2304 				if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
2305 					dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
2306 					sec &= ~(sector_t)(ic->sectors_per_block - 1);
2307 				}
2308 			}
2309 			if (unlikely(sec >= ic->provided_data_sectors))
2310 				continue;
2311 			get_area_and_offset(ic, sec, &area, &offset);
2312 			restore_last_bytes(ic, access_journal_data(ic, i, j), je);
2313 			for (k = j + 1; k < ic->journal_section_entries; k++) {
2314 				struct journal_entry *je2 = access_journal_entry(ic, i, k);
2315 				sector_t sec2, area2, offset2;
2316 				if (journal_entry_is_unused(je2))
2317 					break;
2318 				BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
2319 				sec2 = journal_entry_get_sector(je2);
2320 				if (unlikely(sec2 >= ic->provided_data_sectors))
2321 					break;
2322 				get_area_and_offset(ic, sec2, &area2, &offset2);
2323 				if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
2324 					break;
2325 				restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
2326 			}
2327 			next_loop = k - 1;
2328 
2329 			io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
2330 			io->comp = &comp;
2331 			io->range.logical_sector = sec;
2332 			io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
2333 
2334 			spin_lock_irq(&ic->endio_wait.lock);
2335 			add_new_range_and_wait(ic, &io->range);
2336 
2337 			if (likely(!from_replay)) {
2338 				struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2339 
2340 				/* don't write if there is newer committed sector */
2341 				while (j < k && find_newer_committed_node(ic, &section_node[j])) {
2342 					struct journal_entry *je2 = access_journal_entry(ic, i, j);
2343 
2344 					journal_entry_set_unused(je2);
2345 					remove_journal_node(ic, &section_node[j]);
2346 					j++;
2347 					sec += ic->sectors_per_block;
2348 					offset += ic->sectors_per_block;
2349 				}
2350 				while (j < k && find_newer_committed_node(ic, &section_node[k - 1])) {
2351 					struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2352 
2353 					journal_entry_set_unused(je2);
2354 					remove_journal_node(ic, &section_node[k - 1]);
2355 					k--;
2356 				}
2357 				if (j == k) {
2358 					remove_range_unlocked(ic, &io->range);
2359 					spin_unlock_irq(&ic->endio_wait.lock);
2360 					mempool_free(io, &ic->journal_io_mempool);
2361 					goto skip_io;
2362 				}
2363 				for (l = j; l < k; l++) {
2364 					remove_journal_node(ic, &section_node[l]);
2365 				}
2366 			}
2367 			spin_unlock_irq(&ic->endio_wait.lock);
2368 
2369 			metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2370 			for (l = j; l < k; l++) {
2371 				int r;
2372 				struct journal_entry *je2 = access_journal_entry(ic, i, l);
2373 
2374 				if (
2375 #ifndef INTERNAL_VERIFY
2376 				    unlikely(from_replay) &&
2377 #endif
2378 				    ic->internal_hash) {
2379 					char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2380 
2381 					integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2382 								  (char *)access_journal_data(ic, i, l), test_tag);
2383 					if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
2384 						dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2385 				}
2386 
2387 				journal_entry_set_unused(je2);
2388 				r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2389 							ic->tag_size, TAG_WRITE);
2390 				if (unlikely(r)) {
2391 					dm_integrity_io_error(ic, "reading tags", r);
2392 				}
2393 			}
2394 
2395 			atomic_inc(&comp.in_flight);
2396 			copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2397 					  (k - j) << ic->sb->log2_sectors_per_block,
2398 					  get_data_sector(ic, area, offset),
2399 					  complete_copy_from_journal, io);
2400 skip_io:
2401 			j = next_loop;
2402 		}
2403 	}
2404 
2405 	dm_bufio_write_dirty_buffers_async(ic->bufio);
2406 
2407 	blk_finish_plug(&plug);
2408 
2409 	complete_journal_op(&comp);
2410 	wait_for_completion_io(&comp.comp);
2411 
2412 	dm_integrity_flush_buffers(ic);
2413 }
2414 
2415 static void integrity_writer(struct work_struct *w)
2416 {
2417 	struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2418 	unsigned write_start, write_sections;
2419 
2420 	unsigned prev_free_sectors;
2421 
2422 	/* the following test is not needed, but it tests the replay code */
2423 	if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev)
2424 		return;
2425 
2426 	spin_lock_irq(&ic->endio_wait.lock);
2427 	write_start = ic->committed_section;
2428 	write_sections = ic->n_committed_sections;
2429 	spin_unlock_irq(&ic->endio_wait.lock);
2430 
2431 	if (!write_sections)
2432 		return;
2433 
2434 	do_journal_write(ic, write_start, write_sections, false);
2435 
2436 	spin_lock_irq(&ic->endio_wait.lock);
2437 
2438 	ic->committed_section += write_sections;
2439 	wraparound_section(ic, &ic->committed_section);
2440 	ic->n_committed_sections -= write_sections;
2441 
2442 	prev_free_sectors = ic->free_sectors;
2443 	ic->free_sectors += write_sections * ic->journal_section_entries;
2444 	if (unlikely(!prev_free_sectors))
2445 		wake_up_locked(&ic->endio_wait);
2446 
2447 	spin_unlock_irq(&ic->endio_wait.lock);
2448 }
2449 
2450 static void recalc_write_super(struct dm_integrity_c *ic)
2451 {
2452 	int r;
2453 
2454 	dm_integrity_flush_buffers(ic);
2455 	if (dm_integrity_failed(ic))
2456 		return;
2457 
2458 	r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
2459 	if (unlikely(r))
2460 		dm_integrity_io_error(ic, "writing superblock", r);
2461 }
2462 
2463 static void integrity_recalc(struct work_struct *w)
2464 {
2465 	struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2466 	struct dm_integrity_range range;
2467 	struct dm_io_request io_req;
2468 	struct dm_io_region io_loc;
2469 	sector_t area, offset;
2470 	sector_t metadata_block;
2471 	unsigned metadata_offset;
2472 	sector_t logical_sector, n_sectors;
2473 	__u8 *t;
2474 	unsigned i;
2475 	int r;
2476 	unsigned super_counter = 0;
2477 
2478 	DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
2479 
2480 	spin_lock_irq(&ic->endio_wait.lock);
2481 
2482 next_chunk:
2483 
2484 	if (unlikely(dm_suspended(ic->ti)))
2485 		goto unlock_ret;
2486 
2487 	range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2488 	if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2489 		if (ic->mode == 'B') {
2490 			DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2491 			queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2492 		}
2493 		goto unlock_ret;
2494 	}
2495 
2496 	get_area_and_offset(ic, range.logical_sector, &area, &offset);
2497 	range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
2498 	if (!ic->meta_dev)
2499 		range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
2500 
2501 	add_new_range_and_wait(ic, &range);
2502 	spin_unlock_irq(&ic->endio_wait.lock);
2503 	logical_sector = range.logical_sector;
2504 	n_sectors = range.n_sectors;
2505 
2506 	if (ic->mode == 'B') {
2507 		if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
2508 			goto advance_and_next;
2509 		}
2510 		while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
2511 				       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2512 			logical_sector += ic->sectors_per_block;
2513 			n_sectors -= ic->sectors_per_block;
2514 			cond_resched();
2515 		}
2516 		while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
2517 				       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2518 			n_sectors -= ic->sectors_per_block;
2519 			cond_resched();
2520 		}
2521 		get_area_and_offset(ic, logical_sector, &area, &offset);
2522 	}
2523 
2524 	DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors);
2525 
2526 	if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2527 		recalc_write_super(ic);
2528 		if (ic->mode == 'B') {
2529 			queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2530 		}
2531 		super_counter = 0;
2532 	}
2533 
2534 	if (unlikely(dm_integrity_failed(ic)))
2535 		goto err;
2536 
2537 	io_req.bi_op = REQ_OP_READ;
2538 	io_req.bi_op_flags = 0;
2539 	io_req.mem.type = DM_IO_VMA;
2540 	io_req.mem.ptr.addr = ic->recalc_buffer;
2541 	io_req.notify.fn = NULL;
2542 	io_req.client = ic->io;
2543 	io_loc.bdev = ic->dev->bdev;
2544 	io_loc.sector = get_data_sector(ic, area, offset);
2545 	io_loc.count = n_sectors;
2546 
2547 	r = dm_io(&io_req, 1, &io_loc, NULL);
2548 	if (unlikely(r)) {
2549 		dm_integrity_io_error(ic, "reading data", r);
2550 		goto err;
2551 	}
2552 
2553 	t = ic->recalc_tags;
2554 	for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
2555 		integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
2556 		t += ic->tag_size;
2557 	}
2558 
2559 	metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2560 
2561 	r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
2562 	if (unlikely(r)) {
2563 		dm_integrity_io_error(ic, "writing tags", r);
2564 		goto err;
2565 	}
2566 
2567 advance_and_next:
2568 	cond_resched();
2569 
2570 	spin_lock_irq(&ic->endio_wait.lock);
2571 	remove_range_unlocked(ic, &range);
2572 	ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2573 	goto next_chunk;
2574 
2575 err:
2576 	remove_range(ic, &range);
2577 	return;
2578 
2579 unlock_ret:
2580 	spin_unlock_irq(&ic->endio_wait.lock);
2581 
2582 	recalc_write_super(ic);
2583 }
2584 
2585 static void bitmap_block_work(struct work_struct *w)
2586 {
2587 	struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
2588 	struct dm_integrity_c *ic = bbs->ic;
2589 	struct bio *bio;
2590 	struct bio_list bio_queue;
2591 	struct bio_list waiting;
2592 
2593 	bio_list_init(&waiting);
2594 
2595 	spin_lock(&bbs->bio_queue_lock);
2596 	bio_queue = bbs->bio_queue;
2597 	bio_list_init(&bbs->bio_queue);
2598 	spin_unlock(&bbs->bio_queue_lock);
2599 
2600 	while ((bio = bio_list_pop(&bio_queue))) {
2601 		struct dm_integrity_io *dio;
2602 
2603 		dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2604 
2605 		if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2606 				    dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2607 			remove_range(ic, &dio->range);
2608 			INIT_WORK(&dio->work, integrity_bio_wait);
2609 			queue_work(ic->offload_wq, &dio->work);
2610 		} else {
2611 			block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2612 					dio->range.n_sectors, BITMAP_OP_SET);
2613 			bio_list_add(&waiting, bio);
2614 		}
2615 	}
2616 
2617 	if (bio_list_empty(&waiting))
2618 		return;
2619 
2620 	rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
2621 			   bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
2622 			   BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
2623 
2624 	while ((bio = bio_list_pop(&waiting))) {
2625 		struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2626 
2627 		block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2628 				dio->range.n_sectors, BITMAP_OP_SET);
2629 
2630 		remove_range(ic, &dio->range);
2631 		INIT_WORK(&dio->work, integrity_bio_wait);
2632 		queue_work(ic->offload_wq, &dio->work);
2633 	}
2634 
2635 	queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2636 }
2637 
2638 static void bitmap_flush_work(struct work_struct *work)
2639 {
2640 	struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
2641 	struct dm_integrity_range range;
2642 	unsigned long limit;
2643 	struct bio *bio;
2644 
2645 	dm_integrity_flush_buffers(ic);
2646 
2647 	range.logical_sector = 0;
2648 	range.n_sectors = ic->provided_data_sectors;
2649 
2650 	spin_lock_irq(&ic->endio_wait.lock);
2651 	add_new_range_and_wait(ic, &range);
2652 	spin_unlock_irq(&ic->endio_wait.lock);
2653 
2654 	dm_integrity_flush_buffers(ic);
2655 	if (ic->meta_dev)
2656 		blkdev_issue_flush(ic->dev->bdev, GFP_NOIO);
2657 
2658 	limit = ic->provided_data_sectors;
2659 	if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2660 		limit = le64_to_cpu(ic->sb->recalc_sector)
2661 			>> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
2662 			<< (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2663 	}
2664 	/*DEBUG_print("zeroing journal\n");*/
2665 	block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
2666 	block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
2667 
2668 	rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2669 			   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2670 
2671 	spin_lock_irq(&ic->endio_wait.lock);
2672 	remove_range_unlocked(ic, &range);
2673 	while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
2674 		bio_endio(bio);
2675 		spin_unlock_irq(&ic->endio_wait.lock);
2676 		spin_lock_irq(&ic->endio_wait.lock);
2677 	}
2678 	spin_unlock_irq(&ic->endio_wait.lock);
2679 }
2680 
2681 
2682 static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2683 			 unsigned n_sections, unsigned char commit_seq)
2684 {
2685 	unsigned i, j, n;
2686 
2687 	if (!n_sections)
2688 		return;
2689 
2690 	for (n = 0; n < n_sections; n++) {
2691 		i = start_section + n;
2692 		wraparound_section(ic, &i);
2693 		for (j = 0; j < ic->journal_section_sectors; j++) {
2694 			struct journal_sector *js = access_journal(ic, i, j);
2695 			memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
2696 			js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2697 		}
2698 		for (j = 0; j < ic->journal_section_entries; j++) {
2699 			struct journal_entry *je = access_journal_entry(ic, i, j);
2700 			journal_entry_set_unused(je);
2701 		}
2702 	}
2703 
2704 	write_journal(ic, start_section, n_sections);
2705 }
2706 
2707 static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2708 {
2709 	unsigned char k;
2710 	for (k = 0; k < N_COMMIT_IDS; k++) {
2711 		if (dm_integrity_commit_id(ic, i, j, k) == id)
2712 			return k;
2713 	}
2714 	dm_integrity_io_error(ic, "journal commit id", -EIO);
2715 	return -EIO;
2716 }
2717 
2718 static void replay_journal(struct dm_integrity_c *ic)
2719 {
2720 	unsigned i, j;
2721 	bool used_commit_ids[N_COMMIT_IDS];
2722 	unsigned max_commit_id_sections[N_COMMIT_IDS];
2723 	unsigned write_start, write_sections;
2724 	unsigned continue_section;
2725 	bool journal_empty;
2726 	unsigned char unused, last_used, want_commit_seq;
2727 
2728 	if (ic->mode == 'R')
2729 		return;
2730 
2731 	if (ic->journal_uptodate)
2732 		return;
2733 
2734 	last_used = 0;
2735 	write_start = 0;
2736 
2737 	if (!ic->just_formatted) {
2738 		DEBUG_print("reading journal\n");
2739 		rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2740 		if (ic->journal_io)
2741 			DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2742 		if (ic->journal_io) {
2743 			struct journal_completion crypt_comp;
2744 			crypt_comp.ic = ic;
2745 			init_completion(&crypt_comp.comp);
2746 			crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2747 			encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2748 			wait_for_completion(&crypt_comp.comp);
2749 		}
2750 		DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2751 	}
2752 
2753 	if (dm_integrity_failed(ic))
2754 		goto clear_journal;
2755 
2756 	journal_empty = true;
2757 	memset(used_commit_ids, 0, sizeof used_commit_ids);
2758 	memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2759 	for (i = 0; i < ic->journal_sections; i++) {
2760 		for (j = 0; j < ic->journal_section_sectors; j++) {
2761 			int k;
2762 			struct journal_sector *js = access_journal(ic, i, j);
2763 			k = find_commit_seq(ic, i, j, js->commit_id);
2764 			if (k < 0)
2765 				goto clear_journal;
2766 			used_commit_ids[k] = true;
2767 			max_commit_id_sections[k] = i;
2768 		}
2769 		if (journal_empty) {
2770 			for (j = 0; j < ic->journal_section_entries; j++) {
2771 				struct journal_entry *je = access_journal_entry(ic, i, j);
2772 				if (!journal_entry_is_unused(je)) {
2773 					journal_empty = false;
2774 					break;
2775 				}
2776 			}
2777 		}
2778 	}
2779 
2780 	if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2781 		unused = N_COMMIT_IDS - 1;
2782 		while (unused && !used_commit_ids[unused - 1])
2783 			unused--;
2784 	} else {
2785 		for (unused = 0; unused < N_COMMIT_IDS; unused++)
2786 			if (!used_commit_ids[unused])
2787 				break;
2788 		if (unused == N_COMMIT_IDS) {
2789 			dm_integrity_io_error(ic, "journal commit ids", -EIO);
2790 			goto clear_journal;
2791 		}
2792 	}
2793 	DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2794 		    unused, used_commit_ids[0], used_commit_ids[1],
2795 		    used_commit_ids[2], used_commit_ids[3]);
2796 
2797 	last_used = prev_commit_seq(unused);
2798 	want_commit_seq = prev_commit_seq(last_used);
2799 
2800 	if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2801 		journal_empty = true;
2802 
2803 	write_start = max_commit_id_sections[last_used] + 1;
2804 	if (unlikely(write_start >= ic->journal_sections))
2805 		want_commit_seq = next_commit_seq(want_commit_seq);
2806 	wraparound_section(ic, &write_start);
2807 
2808 	i = write_start;
2809 	for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2810 		for (j = 0; j < ic->journal_section_sectors; j++) {
2811 			struct journal_sector *js = access_journal(ic, i, j);
2812 
2813 			if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2814 				/*
2815 				 * This could be caused by crash during writing.
2816 				 * We won't replay the inconsistent part of the
2817 				 * journal.
2818 				 */
2819 				DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2820 					    i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2821 				goto brk;
2822 			}
2823 		}
2824 		i++;
2825 		if (unlikely(i >= ic->journal_sections))
2826 			want_commit_seq = next_commit_seq(want_commit_seq);
2827 		wraparound_section(ic, &i);
2828 	}
2829 brk:
2830 
2831 	if (!journal_empty) {
2832 		DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2833 			    write_sections, write_start, want_commit_seq);
2834 		do_journal_write(ic, write_start, write_sections, true);
2835 	}
2836 
2837 	if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2838 		continue_section = write_start;
2839 		ic->commit_seq = want_commit_seq;
2840 		DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2841 	} else {
2842 		unsigned s;
2843 		unsigned char erase_seq;
2844 clear_journal:
2845 		DEBUG_print("clearing journal\n");
2846 
2847 		erase_seq = prev_commit_seq(prev_commit_seq(last_used));
2848 		s = write_start;
2849 		init_journal(ic, s, 1, erase_seq);
2850 		s++;
2851 		wraparound_section(ic, &s);
2852 		if (ic->journal_sections >= 2) {
2853 			init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2854 			s += ic->journal_sections - 2;
2855 			wraparound_section(ic, &s);
2856 			init_journal(ic, s, 1, erase_seq);
2857 		}
2858 
2859 		continue_section = 0;
2860 		ic->commit_seq = next_commit_seq(erase_seq);
2861 	}
2862 
2863 	ic->committed_section = continue_section;
2864 	ic->n_committed_sections = 0;
2865 
2866 	ic->uncommitted_section = continue_section;
2867 	ic->n_uncommitted_sections = 0;
2868 
2869 	ic->free_section = continue_section;
2870 	ic->free_section_entry = 0;
2871 	ic->free_sectors = ic->journal_entries;
2872 
2873 	ic->journal_tree_root = RB_ROOT;
2874 	for (i = 0; i < ic->journal_entries; i++)
2875 		init_journal_node(&ic->journal_tree[i]);
2876 }
2877 
2878 static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
2879 {
2880 	DEBUG_print("dm_integrity_enter_synchronous_mode\n");
2881 
2882 	if (ic->mode == 'B') {
2883 		ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
2884 		ic->synchronous_mode = 1;
2885 
2886 		cancel_delayed_work_sync(&ic->bitmap_flush_work);
2887 		queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2888 		flush_workqueue(ic->commit_wq);
2889 	}
2890 }
2891 
2892 static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
2893 {
2894 	struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
2895 
2896 	DEBUG_print("dm_integrity_reboot\n");
2897 
2898 	dm_integrity_enter_synchronous_mode(ic);
2899 
2900 	return NOTIFY_DONE;
2901 }
2902 
2903 static void dm_integrity_postsuspend(struct dm_target *ti)
2904 {
2905 	struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2906 	int r;
2907 
2908 	WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
2909 
2910 	del_timer_sync(&ic->autocommit_timer);
2911 
2912 	if (ic->recalc_wq)
2913 		drain_workqueue(ic->recalc_wq);
2914 
2915 	if (ic->mode == 'B')
2916 		cancel_delayed_work_sync(&ic->bitmap_flush_work);
2917 
2918 	queue_work(ic->commit_wq, &ic->commit_work);
2919 	drain_workqueue(ic->commit_wq);
2920 
2921 	if (ic->mode == 'J') {
2922 		if (ic->meta_dev)
2923 			queue_work(ic->writer_wq, &ic->writer_work);
2924 		drain_workqueue(ic->writer_wq);
2925 		dm_integrity_flush_buffers(ic);
2926 	}
2927 
2928 	if (ic->mode == 'B') {
2929 		dm_integrity_flush_buffers(ic);
2930 #if 1
2931 		/* set to 0 to test bitmap replay code */
2932 		init_journal(ic, 0, ic->journal_sections, 0);
2933 		ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
2934 		r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2935 		if (unlikely(r))
2936 			dm_integrity_io_error(ic, "writing superblock", r);
2937 #endif
2938 	}
2939 
2940 	BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
2941 
2942 	ic->journal_uptodate = true;
2943 }
2944 
2945 static void dm_integrity_resume(struct dm_target *ti)
2946 {
2947 	struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2948 	__u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
2949 	int r;
2950 
2951 	DEBUG_print("resume\n");
2952 
2953 	if (ic->provided_data_sectors != old_provided_data_sectors) {
2954 		if (ic->provided_data_sectors > old_provided_data_sectors &&
2955 		    ic->mode == 'B' &&
2956 		    ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
2957 			rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
2958 					   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2959 			block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
2960 					ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
2961 			rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2962 					   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2963 		}
2964 
2965 		ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
2966 		r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2967 		if (unlikely(r))
2968 			dm_integrity_io_error(ic, "writing superblock", r);
2969 	}
2970 
2971 	if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
2972 		DEBUG_print("resume dirty_bitmap\n");
2973 		rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
2974 				   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2975 		if (ic->mode == 'B') {
2976 			if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
2977 				block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
2978 				block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
2979 				if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
2980 						     BITMAP_OP_TEST_ALL_CLEAR)) {
2981 					ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
2982 					ic->sb->recalc_sector = cpu_to_le64(0);
2983 				}
2984 			} else {
2985 				DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
2986 					    ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
2987 				ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
2988 				block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
2989 				block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
2990 				block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
2991 				rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2992 						   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2993 				ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
2994 				ic->sb->recalc_sector = cpu_to_le64(0);
2995 			}
2996 		} else {
2997 			if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
2998 			      block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) {
2999 				ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3000 				ic->sb->recalc_sector = cpu_to_le64(0);
3001 			}
3002 			init_journal(ic, 0, ic->journal_sections, 0);
3003 			replay_journal(ic);
3004 			ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3005 		}
3006 		r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3007 		if (unlikely(r))
3008 			dm_integrity_io_error(ic, "writing superblock", r);
3009 	} else {
3010 		replay_journal(ic);
3011 		if (ic->mode == 'B') {
3012 			ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3013 			ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3014 			r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3015 			if (unlikely(r))
3016 				dm_integrity_io_error(ic, "writing superblock", r);
3017 
3018 			block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3019 			block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3020 			block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3021 			if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
3022 			    le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
3023 				block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
3024 						ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3025 				block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3026 						ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3027 				block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3028 						ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3029 			}
3030 			rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3031 					   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3032 		}
3033 	}
3034 
3035 	DEBUG_print("testing recalc: %x\n", ic->sb->flags);
3036 	if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3037 		__u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
3038 		DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
3039 		if (recalc_pos < ic->provided_data_sectors) {
3040 			queue_work(ic->recalc_wq, &ic->recalc_work);
3041 		} else if (recalc_pos > ic->provided_data_sectors) {
3042 			ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
3043 			recalc_write_super(ic);
3044 		}
3045 	}
3046 
3047 	ic->reboot_notifier.notifier_call = dm_integrity_reboot;
3048 	ic->reboot_notifier.next = NULL;
3049 	ic->reboot_notifier.priority = INT_MAX - 1;	/* be notified after md and before hardware drivers */
3050 	WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
3051 
3052 #if 0
3053 	/* set to 1 to stress test synchronous mode */
3054 	dm_integrity_enter_synchronous_mode(ic);
3055 #endif
3056 }
3057 
3058 static void dm_integrity_status(struct dm_target *ti, status_type_t type,
3059 				unsigned status_flags, char *result, unsigned maxlen)
3060 {
3061 	struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3062 	unsigned arg_count;
3063 	size_t sz = 0;
3064 
3065 	switch (type) {
3066 	case STATUSTYPE_INFO:
3067 		DMEMIT("%llu %llu",
3068 			(unsigned long long)atomic64_read(&ic->number_of_mismatches),
3069 			ic->provided_data_sectors);
3070 		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3071 			DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
3072 		else
3073 			DMEMIT(" -");
3074 		break;
3075 
3076 	case STATUSTYPE_TABLE: {
3077 		__u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
3078 		watermark_percentage += ic->journal_entries / 2;
3079 		do_div(watermark_percentage, ic->journal_entries);
3080 		arg_count = 3;
3081 		arg_count += !!ic->meta_dev;
3082 		arg_count += ic->sectors_per_block != 1;
3083 		arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
3084 		arg_count += ic->discard;
3085 		arg_count += ic->mode == 'J';
3086 		arg_count += ic->mode == 'J';
3087 		arg_count += ic->mode == 'B';
3088 		arg_count += ic->mode == 'B';
3089 		arg_count += !!ic->internal_hash_alg.alg_string;
3090 		arg_count += !!ic->journal_crypt_alg.alg_string;
3091 		arg_count += !!ic->journal_mac_alg.alg_string;
3092 		arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
3093 		DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
3094 		       ic->tag_size, ic->mode, arg_count);
3095 		if (ic->meta_dev)
3096 			DMEMIT(" meta_device:%s", ic->meta_dev->name);
3097 		if (ic->sectors_per_block != 1)
3098 			DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
3099 		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3100 			DMEMIT(" recalculate");
3101 		if (ic->discard)
3102 			DMEMIT(" allow_discards");
3103 		DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
3104 		DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
3105 		DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
3106 		if (ic->mode == 'J') {
3107 			DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
3108 			DMEMIT(" commit_time:%u", ic->autocommit_msec);
3109 		}
3110 		if (ic->mode == 'B') {
3111 			DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
3112 			DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
3113 		}
3114 		if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
3115 			DMEMIT(" fix_padding");
3116 
3117 #define EMIT_ALG(a, n)							\
3118 		do {							\
3119 			if (ic->a.alg_string) {				\
3120 				DMEMIT(" %s:%s", n, ic->a.alg_string);	\
3121 				if (ic->a.key_string)			\
3122 					DMEMIT(":%s", ic->a.key_string);\
3123 			}						\
3124 		} while (0)
3125 		EMIT_ALG(internal_hash_alg, "internal_hash");
3126 		EMIT_ALG(journal_crypt_alg, "journal_crypt");
3127 		EMIT_ALG(journal_mac_alg, "journal_mac");
3128 		break;
3129 	}
3130 	}
3131 }
3132 
3133 static int dm_integrity_iterate_devices(struct dm_target *ti,
3134 					iterate_devices_callout_fn fn, void *data)
3135 {
3136 	struct dm_integrity_c *ic = ti->private;
3137 
3138 	if (!ic->meta_dev)
3139 		return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
3140 	else
3141 		return fn(ti, ic->dev, 0, ti->len, data);
3142 }
3143 
3144 static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
3145 {
3146 	struct dm_integrity_c *ic = ti->private;
3147 
3148 	if (ic->sectors_per_block > 1) {
3149 		limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3150 		limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3151 		blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
3152 	}
3153 }
3154 
3155 static void calculate_journal_section_size(struct dm_integrity_c *ic)
3156 {
3157 	unsigned sector_space = JOURNAL_SECTOR_DATA;
3158 
3159 	ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
3160 	ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
3161 					 JOURNAL_ENTRY_ROUNDUP);
3162 
3163 	if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
3164 		sector_space -= JOURNAL_MAC_PER_SECTOR;
3165 	ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
3166 	ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
3167 	ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
3168 	ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
3169 }
3170 
3171 static int calculate_device_limits(struct dm_integrity_c *ic)
3172 {
3173 	__u64 initial_sectors;
3174 
3175 	calculate_journal_section_size(ic);
3176 	initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
3177 	if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
3178 		return -EINVAL;
3179 	ic->initial_sectors = initial_sectors;
3180 
3181 	if (!ic->meta_dev) {
3182 		sector_t last_sector, last_area, last_offset;
3183 
3184 		/* we have to maintain excessive padding for compatibility with existing volumes */
3185 		__u64 metadata_run_padding =
3186 			ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
3187 			(__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
3188 			(__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
3189 
3190 		ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
3191 					    metadata_run_padding) >> SECTOR_SHIFT;
3192 		if (!(ic->metadata_run & (ic->metadata_run - 1)))
3193 			ic->log2_metadata_run = __ffs(ic->metadata_run);
3194 		else
3195 			ic->log2_metadata_run = -1;
3196 
3197 		get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
3198 		last_sector = get_data_sector(ic, last_area, last_offset);
3199 		if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
3200 			return -EINVAL;
3201 	} else {
3202 		__u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
3203 		meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
3204 				>> (ic->log2_buffer_sectors + SECTOR_SHIFT);
3205 		meta_size <<= ic->log2_buffer_sectors;
3206 		if (ic->initial_sectors + meta_size < ic->initial_sectors ||
3207 		    ic->initial_sectors + meta_size > ic->meta_device_sectors)
3208 			return -EINVAL;
3209 		ic->metadata_run = 1;
3210 		ic->log2_metadata_run = 0;
3211 	}
3212 
3213 	return 0;
3214 }
3215 
3216 static void get_provided_data_sectors(struct dm_integrity_c *ic)
3217 {
3218 	if (!ic->meta_dev) {
3219 		int test_bit;
3220 		ic->provided_data_sectors = 0;
3221 		for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
3222 			__u64 prev_data_sectors = ic->provided_data_sectors;
3223 
3224 			ic->provided_data_sectors |= (sector_t)1 << test_bit;
3225 			if (calculate_device_limits(ic))
3226 				ic->provided_data_sectors = prev_data_sectors;
3227 		}
3228 	} else {
3229 		ic->provided_data_sectors = ic->data_device_sectors;
3230 		ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
3231 	}
3232 }
3233 
3234 static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
3235 {
3236 	unsigned journal_sections;
3237 	int test_bit;
3238 
3239 	memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
3240 	memcpy(ic->sb->magic, SB_MAGIC, 8);
3241 	ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
3242 	ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
3243 	if (ic->journal_mac_alg.alg_string)
3244 		ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
3245 
3246 	calculate_journal_section_size(ic);
3247 	journal_sections = journal_sectors / ic->journal_section_sectors;
3248 	if (!journal_sections)
3249 		journal_sections = 1;
3250 
3251 	if (!ic->meta_dev) {
3252 		if (ic->fix_padding)
3253 			ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
3254 		ic->sb->journal_sections = cpu_to_le32(journal_sections);
3255 		if (!interleave_sectors)
3256 			interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3257 		ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
3258 		ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3259 		ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3260 
3261 		get_provided_data_sectors(ic);
3262 		if (!ic->provided_data_sectors)
3263 			return -EINVAL;
3264 	} else {
3265 		ic->sb->log2_interleave_sectors = 0;
3266 
3267 		get_provided_data_sectors(ic);
3268 		if (!ic->provided_data_sectors)
3269 			return -EINVAL;
3270 
3271 try_smaller_buffer:
3272 		ic->sb->journal_sections = cpu_to_le32(0);
3273 		for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
3274 			__u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
3275 			__u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
3276 			if (test_journal_sections > journal_sections)
3277 				continue;
3278 			ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
3279 			if (calculate_device_limits(ic))
3280 				ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
3281 
3282 		}
3283 		if (!le32_to_cpu(ic->sb->journal_sections)) {
3284 			if (ic->log2_buffer_sectors > 3) {
3285 				ic->log2_buffer_sectors--;
3286 				goto try_smaller_buffer;
3287 			}
3288 			return -EINVAL;
3289 		}
3290 	}
3291 
3292 	ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3293 
3294 	sb_set_version(ic);
3295 
3296 	return 0;
3297 }
3298 
3299 static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
3300 {
3301 	struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
3302 	struct blk_integrity bi;
3303 
3304 	memset(&bi, 0, sizeof(bi));
3305 	bi.profile = &dm_integrity_profile;
3306 	bi.tuple_size = ic->tag_size;
3307 	bi.tag_size = bi.tuple_size;
3308 	bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
3309 
3310 	blk_integrity_register(disk, &bi);
3311 	blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
3312 }
3313 
3314 static void dm_integrity_free_page_list(struct page_list *pl)
3315 {
3316 	unsigned i;
3317 
3318 	if (!pl)
3319 		return;
3320 	for (i = 0; pl[i].page; i++)
3321 		__free_page(pl[i].page);
3322 	kvfree(pl);
3323 }
3324 
3325 static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
3326 {
3327 	struct page_list *pl;
3328 	unsigned i;
3329 
3330 	pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
3331 	if (!pl)
3332 		return NULL;
3333 
3334 	for (i = 0; i < n_pages; i++) {
3335 		pl[i].page = alloc_page(GFP_KERNEL);
3336 		if (!pl[i].page) {
3337 			dm_integrity_free_page_list(pl);
3338 			return NULL;
3339 		}
3340 		if (i)
3341 			pl[i - 1].next = &pl[i];
3342 	}
3343 	pl[i].page = NULL;
3344 	pl[i].next = NULL;
3345 
3346 	return pl;
3347 }
3348 
3349 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
3350 {
3351 	unsigned i;
3352 	for (i = 0; i < ic->journal_sections; i++)
3353 		kvfree(sl[i]);
3354 	kvfree(sl);
3355 }
3356 
3357 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
3358 								   struct page_list *pl)
3359 {
3360 	struct scatterlist **sl;
3361 	unsigned i;
3362 
3363 	sl = kvmalloc_array(ic->journal_sections,
3364 			    sizeof(struct scatterlist *),
3365 			    GFP_KERNEL | __GFP_ZERO);
3366 	if (!sl)
3367 		return NULL;
3368 
3369 	for (i = 0; i < ic->journal_sections; i++) {
3370 		struct scatterlist *s;
3371 		unsigned start_index, start_offset;
3372 		unsigned end_index, end_offset;
3373 		unsigned n_pages;
3374 		unsigned idx;
3375 
3376 		page_list_location(ic, i, 0, &start_index, &start_offset);
3377 		page_list_location(ic, i, ic->journal_section_sectors - 1,
3378 				   &end_index, &end_offset);
3379 
3380 		n_pages = (end_index - start_index + 1);
3381 
3382 		s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
3383 				   GFP_KERNEL);
3384 		if (!s) {
3385 			dm_integrity_free_journal_scatterlist(ic, sl);
3386 			return NULL;
3387 		}
3388 
3389 		sg_init_table(s, n_pages);
3390 		for (idx = start_index; idx <= end_index; idx++) {
3391 			char *va = lowmem_page_address(pl[idx].page);
3392 			unsigned start = 0, end = PAGE_SIZE;
3393 			if (idx == start_index)
3394 				start = start_offset;
3395 			if (idx == end_index)
3396 				end = end_offset + (1 << SECTOR_SHIFT);
3397 			sg_set_buf(&s[idx - start_index], va + start, end - start);
3398 		}
3399 
3400 		sl[i] = s;
3401 	}
3402 
3403 	return sl;
3404 }
3405 
3406 static void free_alg(struct alg_spec *a)
3407 {
3408 	kzfree(a->alg_string);
3409 	kzfree(a->key);
3410 	memset(a, 0, sizeof *a);
3411 }
3412 
3413 static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
3414 {
3415 	char *k;
3416 
3417 	free_alg(a);
3418 
3419 	a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
3420 	if (!a->alg_string)
3421 		goto nomem;
3422 
3423 	k = strchr(a->alg_string, ':');
3424 	if (k) {
3425 		*k = 0;
3426 		a->key_string = k + 1;
3427 		if (strlen(a->key_string) & 1)
3428 			goto inval;
3429 
3430 		a->key_size = strlen(a->key_string) / 2;
3431 		a->key = kmalloc(a->key_size, GFP_KERNEL);
3432 		if (!a->key)
3433 			goto nomem;
3434 		if (hex2bin(a->key, a->key_string, a->key_size))
3435 			goto inval;
3436 	}
3437 
3438 	return 0;
3439 inval:
3440 	*error = error_inval;
3441 	return -EINVAL;
3442 nomem:
3443 	*error = "Out of memory for an argument";
3444 	return -ENOMEM;
3445 }
3446 
3447 static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
3448 		   char *error_alg, char *error_key)
3449 {
3450 	int r;
3451 
3452 	if (a->alg_string) {
3453 		*hash = crypto_alloc_shash(a->alg_string, 0, 0);
3454 		if (IS_ERR(*hash)) {
3455 			*error = error_alg;
3456 			r = PTR_ERR(*hash);
3457 			*hash = NULL;
3458 			return r;
3459 		}
3460 
3461 		if (a->key) {
3462 			r = crypto_shash_setkey(*hash, a->key, a->key_size);
3463 			if (r) {
3464 				*error = error_key;
3465 				return r;
3466 			}
3467 		} else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
3468 			*error = error_key;
3469 			return -ENOKEY;
3470 		}
3471 	}
3472 
3473 	return 0;
3474 }
3475 
3476 static int create_journal(struct dm_integrity_c *ic, char **error)
3477 {
3478 	int r = 0;
3479 	unsigned i;
3480 	__u64 journal_pages, journal_desc_size, journal_tree_size;
3481 	unsigned char *crypt_data = NULL, *crypt_iv = NULL;
3482 	struct skcipher_request *req = NULL;
3483 
3484 	ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
3485 	ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
3486 	ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
3487 	ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
3488 
3489 	journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
3490 				PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
3491 	journal_desc_size = journal_pages * sizeof(struct page_list);
3492 	if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
3493 		*error = "Journal doesn't fit into memory";
3494 		r = -ENOMEM;
3495 		goto bad;
3496 	}
3497 	ic->journal_pages = journal_pages;
3498 
3499 	ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
3500 	if (!ic->journal) {
3501 		*error = "Could not allocate memory for journal";
3502 		r = -ENOMEM;
3503 		goto bad;
3504 	}
3505 	if (ic->journal_crypt_alg.alg_string) {
3506 		unsigned ivsize, blocksize;
3507 		struct journal_completion comp;
3508 
3509 		comp.ic = ic;
3510 		ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0);
3511 		if (IS_ERR(ic->journal_crypt)) {
3512 			*error = "Invalid journal cipher";
3513 			r = PTR_ERR(ic->journal_crypt);
3514 			ic->journal_crypt = NULL;
3515 			goto bad;
3516 		}
3517 		ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
3518 		blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
3519 
3520 		if (ic->journal_crypt_alg.key) {
3521 			r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
3522 						   ic->journal_crypt_alg.key_size);
3523 			if (r) {
3524 				*error = "Error setting encryption key";
3525 				goto bad;
3526 			}
3527 		}
3528 		DEBUG_print("cipher %s, block size %u iv size %u\n",
3529 			    ic->journal_crypt_alg.alg_string, blocksize, ivsize);
3530 
3531 		ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
3532 		if (!ic->journal_io) {
3533 			*error = "Could not allocate memory for journal io";
3534 			r = -ENOMEM;
3535 			goto bad;
3536 		}
3537 
3538 		if (blocksize == 1) {
3539 			struct scatterlist *sg;
3540 
3541 			req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3542 			if (!req) {
3543 				*error = "Could not allocate crypt request";
3544 				r = -ENOMEM;
3545 				goto bad;
3546 			}
3547 
3548 			crypt_iv = kzalloc(ivsize, GFP_KERNEL);
3549 			if (!crypt_iv) {
3550 				*error = "Could not allocate iv";
3551 				r = -ENOMEM;
3552 				goto bad;
3553 			}
3554 
3555 			ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
3556 			if (!ic->journal_xor) {
3557 				*error = "Could not allocate memory for journal xor";
3558 				r = -ENOMEM;
3559 				goto bad;
3560 			}
3561 
3562 			sg = kvmalloc_array(ic->journal_pages + 1,
3563 					    sizeof(struct scatterlist),
3564 					    GFP_KERNEL);
3565 			if (!sg) {
3566 				*error = "Unable to allocate sg list";
3567 				r = -ENOMEM;
3568 				goto bad;
3569 			}
3570 			sg_init_table(sg, ic->journal_pages + 1);
3571 			for (i = 0; i < ic->journal_pages; i++) {
3572 				char *va = lowmem_page_address(ic->journal_xor[i].page);
3573 				clear_page(va);
3574 				sg_set_buf(&sg[i], va, PAGE_SIZE);
3575 			}
3576 			sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
3577 
3578 			skcipher_request_set_crypt(req, sg, sg,
3579 						   PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
3580 			init_completion(&comp.comp);
3581 			comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3582 			if (do_crypt(true, req, &comp))
3583 				wait_for_completion(&comp.comp);
3584 			kvfree(sg);
3585 			r = dm_integrity_failed(ic);
3586 			if (r) {
3587 				*error = "Unable to encrypt journal";
3588 				goto bad;
3589 			}
3590 			DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
3591 
3592 			crypto_free_skcipher(ic->journal_crypt);
3593 			ic->journal_crypt = NULL;
3594 		} else {
3595 			unsigned crypt_len = roundup(ivsize, blocksize);
3596 
3597 			req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3598 			if (!req) {
3599 				*error = "Could not allocate crypt request";
3600 				r = -ENOMEM;
3601 				goto bad;
3602 			}
3603 
3604 			crypt_iv = kmalloc(ivsize, GFP_KERNEL);
3605 			if (!crypt_iv) {
3606 				*error = "Could not allocate iv";
3607 				r = -ENOMEM;
3608 				goto bad;
3609 			}
3610 
3611 			crypt_data = kmalloc(crypt_len, GFP_KERNEL);
3612 			if (!crypt_data) {
3613 				*error = "Unable to allocate crypt data";
3614 				r = -ENOMEM;
3615 				goto bad;
3616 			}
3617 
3618 			ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
3619 			if (!ic->journal_scatterlist) {
3620 				*error = "Unable to allocate sg list";
3621 				r = -ENOMEM;
3622 				goto bad;
3623 			}
3624 			ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
3625 			if (!ic->journal_io_scatterlist) {
3626 				*error = "Unable to allocate sg list";
3627 				r = -ENOMEM;
3628 				goto bad;
3629 			}
3630 			ic->sk_requests = kvmalloc_array(ic->journal_sections,
3631 							 sizeof(struct skcipher_request *),
3632 							 GFP_KERNEL | __GFP_ZERO);
3633 			if (!ic->sk_requests) {
3634 				*error = "Unable to allocate sk requests";
3635 				r = -ENOMEM;
3636 				goto bad;
3637 			}
3638 			for (i = 0; i < ic->journal_sections; i++) {
3639 				struct scatterlist sg;
3640 				struct skcipher_request *section_req;
3641 				__u32 section_le = cpu_to_le32(i);
3642 
3643 				memset(crypt_iv, 0x00, ivsize);
3644 				memset(crypt_data, 0x00, crypt_len);
3645 				memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
3646 
3647 				sg_init_one(&sg, crypt_data, crypt_len);
3648 				skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
3649 				init_completion(&comp.comp);
3650 				comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3651 				if (do_crypt(true, req, &comp))
3652 					wait_for_completion(&comp.comp);
3653 
3654 				r = dm_integrity_failed(ic);
3655 				if (r) {
3656 					*error = "Unable to generate iv";
3657 					goto bad;
3658 				}
3659 
3660 				section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3661 				if (!section_req) {
3662 					*error = "Unable to allocate crypt request";
3663 					r = -ENOMEM;
3664 					goto bad;
3665 				}
3666 				section_req->iv = kmalloc_array(ivsize, 2,
3667 								GFP_KERNEL);
3668 				if (!section_req->iv) {
3669 					skcipher_request_free(section_req);
3670 					*error = "Unable to allocate iv";
3671 					r = -ENOMEM;
3672 					goto bad;
3673 				}
3674 				memcpy(section_req->iv + ivsize, crypt_data, ivsize);
3675 				section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3676 				ic->sk_requests[i] = section_req;
3677 				DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
3678 			}
3679 		}
3680 	}
3681 
3682 	for (i = 0; i < N_COMMIT_IDS; i++) {
3683 		unsigned j;
3684 retest_commit_id:
3685 		for (j = 0; j < i; j++) {
3686 			if (ic->commit_ids[j] == ic->commit_ids[i]) {
3687 				ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3688 				goto retest_commit_id;
3689 			}
3690 		}
3691 		DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3692 	}
3693 
3694 	journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3695 	if (journal_tree_size > ULONG_MAX) {
3696 		*error = "Journal doesn't fit into memory";
3697 		r = -ENOMEM;
3698 		goto bad;
3699 	}
3700 	ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
3701 	if (!ic->journal_tree) {
3702 		*error = "Could not allocate memory for journal tree";
3703 		r = -ENOMEM;
3704 	}
3705 bad:
3706 	kfree(crypt_data);
3707 	kfree(crypt_iv);
3708 	skcipher_request_free(req);
3709 
3710 	return r;
3711 }
3712 
3713 /*
3714  * Construct a integrity mapping
3715  *
3716  * Arguments:
3717  *	device
3718  *	offset from the start of the device
3719  *	tag size
3720  *	D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
3721  *	number of optional arguments
3722  *	optional arguments:
3723  *		journal_sectors
3724  *		interleave_sectors
3725  *		buffer_sectors
3726  *		journal_watermark
3727  *		commit_time
3728  *		meta_device
3729  *		block_size
3730  *		sectors_per_bit
3731  *		bitmap_flush_interval
3732  *		internal_hash
3733  *		journal_crypt
3734  *		journal_mac
3735  *		recalculate
3736  */
3737 static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3738 {
3739 	struct dm_integrity_c *ic;
3740 	char dummy;
3741 	int r;
3742 	unsigned extra_args;
3743 	struct dm_arg_set as;
3744 	static const struct dm_arg _args[] = {
3745 		{0, 9, "Invalid number of feature args"},
3746 	};
3747 	unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
3748 	bool should_write_sb;
3749 	__u64 threshold;
3750 	unsigned long long start;
3751 	__s8 log2_sectors_per_bitmap_bit = -1;
3752 	__s8 log2_blocks_per_bitmap_bit;
3753 	__u64 bits_in_journal;
3754 	__u64 n_bitmap_bits;
3755 
3756 #define DIRECT_ARGUMENTS	4
3757 
3758 	if (argc <= DIRECT_ARGUMENTS) {
3759 		ti->error = "Invalid argument count";
3760 		return -EINVAL;
3761 	}
3762 
3763 	ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
3764 	if (!ic) {
3765 		ti->error = "Cannot allocate integrity context";
3766 		return -ENOMEM;
3767 	}
3768 	ti->private = ic;
3769 	ti->per_io_data_size = sizeof(struct dm_integrity_io);
3770 	ic->ti = ti;
3771 
3772 	ic->in_progress = RB_ROOT;
3773 	INIT_LIST_HEAD(&ic->wait_list);
3774 	init_waitqueue_head(&ic->endio_wait);
3775 	bio_list_init(&ic->flush_bio_list);
3776 	init_waitqueue_head(&ic->copy_to_journal_wait);
3777 	init_completion(&ic->crypto_backoff);
3778 	atomic64_set(&ic->number_of_mismatches, 0);
3779 	ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
3780 
3781 	r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
3782 	if (r) {
3783 		ti->error = "Device lookup failed";
3784 		goto bad;
3785 	}
3786 
3787 	if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
3788 		ti->error = "Invalid starting offset";
3789 		r = -EINVAL;
3790 		goto bad;
3791 	}
3792 	ic->start = start;
3793 
3794 	if (strcmp(argv[2], "-")) {
3795 		if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
3796 			ti->error = "Invalid tag size";
3797 			r = -EINVAL;
3798 			goto bad;
3799 		}
3800 	}
3801 
3802 	if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
3803 	    !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
3804 		ic->mode = argv[3][0];
3805 	} else {
3806 		ti->error = "Invalid mode (expecting J, B, D, R)";
3807 		r = -EINVAL;
3808 		goto bad;
3809 	}
3810 
3811 	journal_sectors = 0;
3812 	interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3813 	buffer_sectors = DEFAULT_BUFFER_SECTORS;
3814 	journal_watermark = DEFAULT_JOURNAL_WATERMARK;
3815 	sync_msec = DEFAULT_SYNC_MSEC;
3816 	ic->sectors_per_block = 1;
3817 
3818 	as.argc = argc - DIRECT_ARGUMENTS;
3819 	as.argv = argv + DIRECT_ARGUMENTS;
3820 	r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
3821 	if (r)
3822 		goto bad;
3823 
3824 	while (extra_args--) {
3825 		const char *opt_string;
3826 		unsigned val;
3827 		unsigned long long llval;
3828 		opt_string = dm_shift_arg(&as);
3829 		if (!opt_string) {
3830 			r = -EINVAL;
3831 			ti->error = "Not enough feature arguments";
3832 			goto bad;
3833 		}
3834 		if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
3835 			journal_sectors = val ? val : 1;
3836 		else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
3837 			interleave_sectors = val;
3838 		else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
3839 			buffer_sectors = val;
3840 		else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
3841 			journal_watermark = val;
3842 		else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
3843 			sync_msec = val;
3844 		else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
3845 			if (ic->meta_dev) {
3846 				dm_put_device(ti, ic->meta_dev);
3847 				ic->meta_dev = NULL;
3848 			}
3849 			r = dm_get_device(ti, strchr(opt_string, ':') + 1,
3850 					  dm_table_get_mode(ti->table), &ic->meta_dev);
3851 			if (r) {
3852 				ti->error = "Device lookup failed";
3853 				goto bad;
3854 			}
3855 		} else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
3856 			if (val < 1 << SECTOR_SHIFT ||
3857 			    val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
3858 			    (val & (val -1))) {
3859 				r = -EINVAL;
3860 				ti->error = "Invalid block_size argument";
3861 				goto bad;
3862 			}
3863 			ic->sectors_per_block = val >> SECTOR_SHIFT;
3864 		} else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
3865 			log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
3866 		} else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
3867 			if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
3868 				r = -EINVAL;
3869 				ti->error = "Invalid bitmap_flush_interval argument";
3870 			}
3871 			ic->bitmap_flush_interval = msecs_to_jiffies(val);
3872 		} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
3873 			r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
3874 					    "Invalid internal_hash argument");
3875 			if (r)
3876 				goto bad;
3877 		} else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
3878 			r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
3879 					    "Invalid journal_crypt argument");
3880 			if (r)
3881 				goto bad;
3882 		} else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
3883 			r = get_alg_and_key(opt_string, &ic->journal_mac_alg,  &ti->error,
3884 					    "Invalid journal_mac argument");
3885 			if (r)
3886 				goto bad;
3887 		} else if (!strcmp(opt_string, "recalculate")) {
3888 			ic->recalculate_flag = true;
3889 		} else if (!strcmp(opt_string, "allow_discards")) {
3890 			ic->discard = true;
3891 		} else if (!strcmp(opt_string, "fix_padding")) {
3892 			ic->fix_padding = true;
3893 		} else {
3894 			r = -EINVAL;
3895 			ti->error = "Invalid argument";
3896 			goto bad;
3897 		}
3898 	}
3899 
3900 	ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
3901 	if (!ic->meta_dev)
3902 		ic->meta_device_sectors = ic->data_device_sectors;
3903 	else
3904 		ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
3905 
3906 	if (!journal_sectors) {
3907 		journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
3908 				      ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
3909 	}
3910 
3911 	if (!buffer_sectors)
3912 		buffer_sectors = 1;
3913 	ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
3914 
3915 	r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
3916 		    "Invalid internal hash", "Error setting internal hash key");
3917 	if (r)
3918 		goto bad;
3919 
3920 	r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
3921 		    "Invalid journal mac", "Error setting journal mac key");
3922 	if (r)
3923 		goto bad;
3924 
3925 	if (!ic->tag_size) {
3926 		if (!ic->internal_hash) {
3927 			ti->error = "Unknown tag size";
3928 			r = -EINVAL;
3929 			goto bad;
3930 		}
3931 		ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
3932 	}
3933 	if (ic->tag_size > MAX_TAG_SIZE) {
3934 		ti->error = "Too big tag size";
3935 		r = -EINVAL;
3936 		goto bad;
3937 	}
3938 	if (!(ic->tag_size & (ic->tag_size - 1)))
3939 		ic->log2_tag_size = __ffs(ic->tag_size);
3940 	else
3941 		ic->log2_tag_size = -1;
3942 
3943 	if (ic->mode == 'B' && !ic->internal_hash) {
3944 		r = -EINVAL;
3945 		ti->error = "Bitmap mode can be only used with internal hash";
3946 		goto bad;
3947 	}
3948 
3949 	if (ic->discard && !ic->internal_hash) {
3950 		r = -EINVAL;
3951 		ti->error = "Discard can be only used with internal hash";
3952 		goto bad;
3953 	}
3954 
3955 	ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
3956 	ic->autocommit_msec = sync_msec;
3957 	timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
3958 
3959 	ic->io = dm_io_client_create();
3960 	if (IS_ERR(ic->io)) {
3961 		r = PTR_ERR(ic->io);
3962 		ic->io = NULL;
3963 		ti->error = "Cannot allocate dm io";
3964 		goto bad;
3965 	}
3966 
3967 	r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
3968 	if (r) {
3969 		ti->error = "Cannot allocate mempool";
3970 		goto bad;
3971 	}
3972 
3973 	ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
3974 					  WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
3975 	if (!ic->metadata_wq) {
3976 		ti->error = "Cannot allocate workqueue";
3977 		r = -ENOMEM;
3978 		goto bad;
3979 	}
3980 
3981 	/*
3982 	 * If this workqueue were percpu, it would cause bio reordering
3983 	 * and reduced performance.
3984 	 */
3985 	ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
3986 	if (!ic->wait_wq) {
3987 		ti->error = "Cannot allocate workqueue";
3988 		r = -ENOMEM;
3989 		goto bad;
3990 	}
3991 
3992 	ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
3993 					  METADATA_WORKQUEUE_MAX_ACTIVE);
3994 	if (!ic->offload_wq) {
3995 		ti->error = "Cannot allocate workqueue";
3996 		r = -ENOMEM;
3997 		goto bad;
3998 	}
3999 
4000 	ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
4001 	if (!ic->commit_wq) {
4002 		ti->error = "Cannot allocate workqueue";
4003 		r = -ENOMEM;
4004 		goto bad;
4005 	}
4006 	INIT_WORK(&ic->commit_work, integrity_commit);
4007 
4008 	if (ic->mode == 'J' || ic->mode == 'B') {
4009 		ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
4010 		if (!ic->writer_wq) {
4011 			ti->error = "Cannot allocate workqueue";
4012 			r = -ENOMEM;
4013 			goto bad;
4014 		}
4015 		INIT_WORK(&ic->writer_work, integrity_writer);
4016 	}
4017 
4018 	ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
4019 	if (!ic->sb) {
4020 		r = -ENOMEM;
4021 		ti->error = "Cannot allocate superblock area";
4022 		goto bad;
4023 	}
4024 
4025 	r = sync_rw_sb(ic, REQ_OP_READ, 0);
4026 	if (r) {
4027 		ti->error = "Error reading superblock";
4028 		goto bad;
4029 	}
4030 	should_write_sb = false;
4031 	if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
4032 		if (ic->mode != 'R') {
4033 			if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
4034 				r = -EINVAL;
4035 				ti->error = "The device is not initialized";
4036 				goto bad;
4037 			}
4038 		}
4039 
4040 		r = initialize_superblock(ic, journal_sectors, interleave_sectors);
4041 		if (r) {
4042 			ti->error = "Could not initialize superblock";
4043 			goto bad;
4044 		}
4045 		if (ic->mode != 'R')
4046 			should_write_sb = true;
4047 	}
4048 
4049 	if (!ic->sb->version || ic->sb->version > SB_VERSION_4) {
4050 		r = -EINVAL;
4051 		ti->error = "Unknown version";
4052 		goto bad;
4053 	}
4054 	if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
4055 		r = -EINVAL;
4056 		ti->error = "Tag size doesn't match the information in superblock";
4057 		goto bad;
4058 	}
4059 	if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
4060 		r = -EINVAL;
4061 		ti->error = "Block size doesn't match the information in superblock";
4062 		goto bad;
4063 	}
4064 	if (!le32_to_cpu(ic->sb->journal_sections)) {
4065 		r = -EINVAL;
4066 		ti->error = "Corrupted superblock, journal_sections is 0";
4067 		goto bad;
4068 	}
4069 	/* make sure that ti->max_io_len doesn't overflow */
4070 	if (!ic->meta_dev) {
4071 		if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
4072 		    ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
4073 			r = -EINVAL;
4074 			ti->error = "Invalid interleave_sectors in the superblock";
4075 			goto bad;
4076 		}
4077 	} else {
4078 		if (ic->sb->log2_interleave_sectors) {
4079 			r = -EINVAL;
4080 			ti->error = "Invalid interleave_sectors in the superblock";
4081 			goto bad;
4082 		}
4083 	}
4084 	if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
4085 		r = -EINVAL;
4086 		ti->error = "Journal mac mismatch";
4087 		goto bad;
4088 	}
4089 
4090 	get_provided_data_sectors(ic);
4091 	if (!ic->provided_data_sectors) {
4092 		r = -EINVAL;
4093 		ti->error = "The device is too small";
4094 		goto bad;
4095 	}
4096 
4097 try_smaller_buffer:
4098 	r = calculate_device_limits(ic);
4099 	if (r) {
4100 		if (ic->meta_dev) {
4101 			if (ic->log2_buffer_sectors > 3) {
4102 				ic->log2_buffer_sectors--;
4103 				goto try_smaller_buffer;
4104 			}
4105 		}
4106 		ti->error = "The device is too small";
4107 		goto bad;
4108 	}
4109 
4110 	if (log2_sectors_per_bitmap_bit < 0)
4111 		log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
4112 	if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
4113 		log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
4114 
4115 	bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
4116 	if (bits_in_journal > UINT_MAX)
4117 		bits_in_journal = UINT_MAX;
4118 	while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
4119 		log2_sectors_per_bitmap_bit++;
4120 
4121 	log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
4122 	ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4123 	if (should_write_sb) {
4124 		ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4125 	}
4126 	n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
4127 				+ (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
4128 	ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
4129 
4130 	if (!ic->meta_dev)
4131 		ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
4132 
4133 	if (ti->len > ic->provided_data_sectors) {
4134 		r = -EINVAL;
4135 		ti->error = "Not enough provided sectors for requested mapping size";
4136 		goto bad;
4137 	}
4138 
4139 
4140 	threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
4141 	threshold += 50;
4142 	do_div(threshold, 100);
4143 	ic->free_sectors_threshold = threshold;
4144 
4145 	DEBUG_print("initialized:\n");
4146 	DEBUG_print("	integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
4147 	DEBUG_print("	journal_entry_size %u\n", ic->journal_entry_size);
4148 	DEBUG_print("	journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
4149 	DEBUG_print("	journal_section_entries %u\n", ic->journal_section_entries);
4150 	DEBUG_print("	journal_section_sectors %u\n", ic->journal_section_sectors);
4151 	DEBUG_print("	journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
4152 	DEBUG_print("	journal_entries %u\n", ic->journal_entries);
4153 	DEBUG_print("	log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
4154 	DEBUG_print("	data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
4155 	DEBUG_print("	initial_sectors 0x%x\n", ic->initial_sectors);
4156 	DEBUG_print("	metadata_run 0x%x\n", ic->metadata_run);
4157 	DEBUG_print("	log2_metadata_run %d\n", ic->log2_metadata_run);
4158 	DEBUG_print("	provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
4159 	DEBUG_print("	log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
4160 	DEBUG_print("	bits_in_journal %llu\n", bits_in_journal);
4161 
4162 	if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
4163 		ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
4164 		ic->sb->recalc_sector = cpu_to_le64(0);
4165 	}
4166 
4167 	if (ic->internal_hash) {
4168 		ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
4169 		if (!ic->recalc_wq ) {
4170 			ti->error = "Cannot allocate workqueue";
4171 			r = -ENOMEM;
4172 			goto bad;
4173 		}
4174 		INIT_WORK(&ic->recalc_work, integrity_recalc);
4175 		ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
4176 		if (!ic->recalc_buffer) {
4177 			ti->error = "Cannot allocate buffer for recalculating";
4178 			r = -ENOMEM;
4179 			goto bad;
4180 		}
4181 		ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
4182 						 ic->tag_size, GFP_KERNEL);
4183 		if (!ic->recalc_tags) {
4184 			ti->error = "Cannot allocate tags for recalculating";
4185 			r = -ENOMEM;
4186 			goto bad;
4187 		}
4188 	}
4189 
4190 	ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
4191 			1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
4192 	if (IS_ERR(ic->bufio)) {
4193 		r = PTR_ERR(ic->bufio);
4194 		ti->error = "Cannot initialize dm-bufio";
4195 		ic->bufio = NULL;
4196 		goto bad;
4197 	}
4198 	dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
4199 
4200 	if (ic->mode != 'R') {
4201 		r = create_journal(ic, &ti->error);
4202 		if (r)
4203 			goto bad;
4204 
4205 	}
4206 
4207 	if (ic->mode == 'B') {
4208 		unsigned i;
4209 		unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
4210 
4211 		ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4212 		if (!ic->recalc_bitmap) {
4213 			r = -ENOMEM;
4214 			goto bad;
4215 		}
4216 		ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4217 		if (!ic->may_write_bitmap) {
4218 			r = -ENOMEM;
4219 			goto bad;
4220 		}
4221 		ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
4222 		if (!ic->bbs) {
4223 			r = -ENOMEM;
4224 			goto bad;
4225 		}
4226 		INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
4227 		for (i = 0; i < ic->n_bitmap_blocks; i++) {
4228 			struct bitmap_block_status *bbs = &ic->bbs[i];
4229 			unsigned sector, pl_index, pl_offset;
4230 
4231 			INIT_WORK(&bbs->work, bitmap_block_work);
4232 			bbs->ic = ic;
4233 			bbs->idx = i;
4234 			bio_list_init(&bbs->bio_queue);
4235 			spin_lock_init(&bbs->bio_queue_lock);
4236 
4237 			sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
4238 			pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
4239 			pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
4240 
4241 			bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
4242 		}
4243 	}
4244 
4245 	if (should_write_sb) {
4246 		int r;
4247 
4248 		init_journal(ic, 0, ic->journal_sections, 0);
4249 		r = dm_integrity_failed(ic);
4250 		if (unlikely(r)) {
4251 			ti->error = "Error initializing journal";
4252 			goto bad;
4253 		}
4254 		r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
4255 		if (r) {
4256 			ti->error = "Error initializing superblock";
4257 			goto bad;
4258 		}
4259 		ic->just_formatted = true;
4260 	}
4261 
4262 	if (!ic->meta_dev) {
4263 		r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
4264 		if (r)
4265 			goto bad;
4266 	}
4267 	if (ic->mode == 'B') {
4268 		unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
4269 		if (!max_io_len)
4270 			max_io_len = 1U << 31;
4271 		DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
4272 		if (!ti->max_io_len || ti->max_io_len > max_io_len) {
4273 			r = dm_set_target_max_io_len(ti, max_io_len);
4274 			if (r)
4275 				goto bad;
4276 		}
4277 	}
4278 
4279 	if (!ic->internal_hash)
4280 		dm_integrity_set(ti, ic);
4281 
4282 	ti->num_flush_bios = 1;
4283 	ti->flush_supported = true;
4284 	if (ic->discard)
4285 		ti->num_discard_bios = 1;
4286 
4287 	return 0;
4288 
4289 bad:
4290 	dm_integrity_dtr(ti);
4291 	return r;
4292 }
4293 
4294 static void dm_integrity_dtr(struct dm_target *ti)
4295 {
4296 	struct dm_integrity_c *ic = ti->private;
4297 
4298 	BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
4299 	BUG_ON(!list_empty(&ic->wait_list));
4300 
4301 	if (ic->metadata_wq)
4302 		destroy_workqueue(ic->metadata_wq);
4303 	if (ic->wait_wq)
4304 		destroy_workqueue(ic->wait_wq);
4305 	if (ic->offload_wq)
4306 		destroy_workqueue(ic->offload_wq);
4307 	if (ic->commit_wq)
4308 		destroy_workqueue(ic->commit_wq);
4309 	if (ic->writer_wq)
4310 		destroy_workqueue(ic->writer_wq);
4311 	if (ic->recalc_wq)
4312 		destroy_workqueue(ic->recalc_wq);
4313 	vfree(ic->recalc_buffer);
4314 	kvfree(ic->recalc_tags);
4315 	kvfree(ic->bbs);
4316 	if (ic->bufio)
4317 		dm_bufio_client_destroy(ic->bufio);
4318 	mempool_exit(&ic->journal_io_mempool);
4319 	if (ic->io)
4320 		dm_io_client_destroy(ic->io);
4321 	if (ic->dev)
4322 		dm_put_device(ti, ic->dev);
4323 	if (ic->meta_dev)
4324 		dm_put_device(ti, ic->meta_dev);
4325 	dm_integrity_free_page_list(ic->journal);
4326 	dm_integrity_free_page_list(ic->journal_io);
4327 	dm_integrity_free_page_list(ic->journal_xor);
4328 	dm_integrity_free_page_list(ic->recalc_bitmap);
4329 	dm_integrity_free_page_list(ic->may_write_bitmap);
4330 	if (ic->journal_scatterlist)
4331 		dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
4332 	if (ic->journal_io_scatterlist)
4333 		dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
4334 	if (ic->sk_requests) {
4335 		unsigned i;
4336 
4337 		for (i = 0; i < ic->journal_sections; i++) {
4338 			struct skcipher_request *req = ic->sk_requests[i];
4339 			if (req) {
4340 				kzfree(req->iv);
4341 				skcipher_request_free(req);
4342 			}
4343 		}
4344 		kvfree(ic->sk_requests);
4345 	}
4346 	kvfree(ic->journal_tree);
4347 	if (ic->sb)
4348 		free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
4349 
4350 	if (ic->internal_hash)
4351 		crypto_free_shash(ic->internal_hash);
4352 	free_alg(&ic->internal_hash_alg);
4353 
4354 	if (ic->journal_crypt)
4355 		crypto_free_skcipher(ic->journal_crypt);
4356 	free_alg(&ic->journal_crypt_alg);
4357 
4358 	if (ic->journal_mac)
4359 		crypto_free_shash(ic->journal_mac);
4360 	free_alg(&ic->journal_mac_alg);
4361 
4362 	kfree(ic);
4363 }
4364 
4365 static struct target_type integrity_target = {
4366 	.name			= "integrity",
4367 	.version		= {1, 6, 0},
4368 	.module			= THIS_MODULE,
4369 	.features		= DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
4370 	.ctr			= dm_integrity_ctr,
4371 	.dtr			= dm_integrity_dtr,
4372 	.map			= dm_integrity_map,
4373 	.postsuspend		= dm_integrity_postsuspend,
4374 	.resume			= dm_integrity_resume,
4375 	.status			= dm_integrity_status,
4376 	.iterate_devices	= dm_integrity_iterate_devices,
4377 	.io_hints		= dm_integrity_io_hints,
4378 };
4379 
4380 static int __init dm_integrity_init(void)
4381 {
4382 	int r;
4383 
4384 	journal_io_cache = kmem_cache_create("integrity_journal_io",
4385 					     sizeof(struct journal_io), 0, 0, NULL);
4386 	if (!journal_io_cache) {
4387 		DMERR("can't allocate journal io cache");
4388 		return -ENOMEM;
4389 	}
4390 
4391 	r = dm_register_target(&integrity_target);
4392 
4393 	if (r < 0)
4394 		DMERR("register failed %d", r);
4395 
4396 	return r;
4397 }
4398 
4399 static void __exit dm_integrity_exit(void)
4400 {
4401 	dm_unregister_target(&integrity_target);
4402 	kmem_cache_destroy(journal_io_cache);
4403 }
4404 
4405 module_init(dm_integrity_init);
4406 module_exit(dm_integrity_exit);
4407 
4408 MODULE_AUTHOR("Milan Broz");
4409 MODULE_AUTHOR("Mikulas Patocka");
4410 MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
4411 MODULE_LICENSE("GPL");
4412