xref: /linux/drivers/md/dm-verity-target.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * Copyright (C) 2012 Red Hat, Inc.
3  *
4  * Author: Mikulas Patocka <mpatocka@redhat.com>
5  *
6  * Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors
7  *
8  * This file is released under the GPLv2.
9  *
10  * In the file "/sys/module/dm_verity/parameters/prefetch_cluster" you can set
11  * default prefetch value. Data are read in "prefetch_cluster" chunks from the
12  * hash device. Setting this greatly improves performance when data and hash
13  * are on the same disk on different partitions on devices with poor random
14  * access behavior.
15  */
16 
17 #include "dm-verity.h"
18 #include "dm-verity-fec.h"
19 
20 #include <linux/module.h>
21 #include <linux/reboot.h>
22 
23 #define DM_MSG_PREFIX			"verity"
24 
25 #define DM_VERITY_ENV_LENGTH		42
26 #define DM_VERITY_ENV_VAR_NAME		"DM_VERITY_ERR_BLOCK_NR"
27 
28 #define DM_VERITY_DEFAULT_PREFETCH_SIZE	262144
29 
30 #define DM_VERITY_MAX_CORRUPTED_ERRS	100
31 
32 #define DM_VERITY_OPT_LOGGING		"ignore_corruption"
33 #define DM_VERITY_OPT_RESTART		"restart_on_corruption"
34 #define DM_VERITY_OPT_IGN_ZEROES	"ignore_zero_blocks"
35 
36 #define DM_VERITY_OPTS_MAX		(2 + DM_VERITY_OPTS_FEC)
37 
38 static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
39 
40 module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
41 
42 struct dm_verity_prefetch_work {
43 	struct work_struct work;
44 	struct dm_verity *v;
45 	sector_t block;
46 	unsigned n_blocks;
47 };
48 
49 /*
50  * Auxiliary structure appended to each dm-bufio buffer. If the value
51  * hash_verified is nonzero, hash of the block has been verified.
52  *
53  * The variable hash_verified is set to 0 when allocating the buffer, then
54  * it can be changed to 1 and it is never reset to 0 again.
55  *
56  * There is no lock around this value, a race condition can at worst cause
57  * that multiple processes verify the hash of the same buffer simultaneously
58  * and write 1 to hash_verified simultaneously.
59  * This condition is harmless, so we don't need locking.
60  */
61 struct buffer_aux {
62 	int hash_verified;
63 };
64 
65 /*
66  * Initialize struct buffer_aux for a freshly created buffer.
67  */
68 static void dm_bufio_alloc_callback(struct dm_buffer *buf)
69 {
70 	struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
71 
72 	aux->hash_verified = 0;
73 }
74 
75 /*
76  * Translate input sector number to the sector number on the target device.
77  */
78 static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector)
79 {
80 	return v->data_start + dm_target_offset(v->ti, bi_sector);
81 }
82 
83 /*
84  * Return hash position of a specified block at a specified tree level
85  * (0 is the lowest level).
86  * The lowest "hash_per_block_bits"-bits of the result denote hash position
87  * inside a hash block. The remaining bits denote location of the hash block.
88  */
89 static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
90 					 int level)
91 {
92 	return block >> (level * v->hash_per_block_bits);
93 }
94 
95 /*
96  * Wrapper for crypto_shash_init, which handles verity salting.
97  */
98 static int verity_hash_init(struct dm_verity *v, struct shash_desc *desc)
99 {
100 	int r;
101 
102 	desc->tfm = v->tfm;
103 	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
104 
105 	r = crypto_shash_init(desc);
106 
107 	if (unlikely(r < 0)) {
108 		DMERR("crypto_shash_init failed: %d", r);
109 		return r;
110 	}
111 
112 	if (likely(v->version >= 1)) {
113 		r = crypto_shash_update(desc, v->salt, v->salt_size);
114 
115 		if (unlikely(r < 0)) {
116 			DMERR("crypto_shash_update failed: %d", r);
117 			return r;
118 		}
119 	}
120 
121 	return 0;
122 }
123 
124 static int verity_hash_update(struct dm_verity *v, struct shash_desc *desc,
125 			      const u8 *data, size_t len)
126 {
127 	int r = crypto_shash_update(desc, data, len);
128 
129 	if (unlikely(r < 0))
130 		DMERR("crypto_shash_update failed: %d", r);
131 
132 	return r;
133 }
134 
135 static int verity_hash_final(struct dm_verity *v, struct shash_desc *desc,
136 			     u8 *digest)
137 {
138 	int r;
139 
140 	if (unlikely(!v->version)) {
141 		r = crypto_shash_update(desc, v->salt, v->salt_size);
142 
143 		if (r < 0) {
144 			DMERR("crypto_shash_update failed: %d", r);
145 			return r;
146 		}
147 	}
148 
149 	r = crypto_shash_final(desc, digest);
150 
151 	if (unlikely(r < 0))
152 		DMERR("crypto_shash_final failed: %d", r);
153 
154 	return r;
155 }
156 
157 int verity_hash(struct dm_verity *v, struct shash_desc *desc,
158 		const u8 *data, size_t len, u8 *digest)
159 {
160 	int r;
161 
162 	r = verity_hash_init(v, desc);
163 	if (unlikely(r < 0))
164 		return r;
165 
166 	r = verity_hash_update(v, desc, data, len);
167 	if (unlikely(r < 0))
168 		return r;
169 
170 	return verity_hash_final(v, desc, digest);
171 }
172 
173 static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
174 				 sector_t *hash_block, unsigned *offset)
175 {
176 	sector_t position = verity_position_at_level(v, block, level);
177 	unsigned idx;
178 
179 	*hash_block = v->hash_level_block[level] + (position >> v->hash_per_block_bits);
180 
181 	if (!offset)
182 		return;
183 
184 	idx = position & ((1 << v->hash_per_block_bits) - 1);
185 	if (!v->version)
186 		*offset = idx * v->digest_size;
187 	else
188 		*offset = idx << (v->hash_dev_block_bits - v->hash_per_block_bits);
189 }
190 
191 /*
192  * Handle verification errors.
193  */
194 static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
195 			     unsigned long long block)
196 {
197 	char verity_env[DM_VERITY_ENV_LENGTH];
198 	char *envp[] = { verity_env, NULL };
199 	const char *type_str = "";
200 	struct mapped_device *md = dm_table_get_md(v->ti->table);
201 
202 	/* Corruption should be visible in device status in all modes */
203 	v->hash_failed = 1;
204 
205 	if (v->corrupted_errs >= DM_VERITY_MAX_CORRUPTED_ERRS)
206 		goto out;
207 
208 	v->corrupted_errs++;
209 
210 	switch (type) {
211 	case DM_VERITY_BLOCK_TYPE_DATA:
212 		type_str = "data";
213 		break;
214 	case DM_VERITY_BLOCK_TYPE_METADATA:
215 		type_str = "metadata";
216 		break;
217 	default:
218 		BUG();
219 	}
220 
221 	DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str,
222 		block);
223 
224 	if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
225 		DMERR("%s: reached maximum errors", v->data_dev->name);
226 
227 	snprintf(verity_env, DM_VERITY_ENV_LENGTH, "%s=%d,%llu",
228 		DM_VERITY_ENV_VAR_NAME, type, block);
229 
230 	kobject_uevent_env(&disk_to_dev(dm_disk(md))->kobj, KOBJ_CHANGE, envp);
231 
232 out:
233 	if (v->mode == DM_VERITY_MODE_LOGGING)
234 		return 0;
235 
236 	if (v->mode == DM_VERITY_MODE_RESTART)
237 		kernel_restart("dm-verity device corrupted");
238 
239 	return 1;
240 }
241 
242 /*
243  * Verify hash of a metadata block pertaining to the specified data block
244  * ("block" argument) at a specified level ("level" argument).
245  *
246  * On successful return, verity_io_want_digest(v, io) contains the hash value
247  * for a lower tree level or for the data block (if we're at the lowest level).
248  *
249  * If "skip_unverified" is true, unverified buffer is skipped and 1 is returned.
250  * If "skip_unverified" is false, unverified buffer is hashed and verified
251  * against current value of verity_io_want_digest(v, io).
252  */
253 static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
254 			       sector_t block, int level, bool skip_unverified,
255 			       u8 *want_digest)
256 {
257 	struct dm_buffer *buf;
258 	struct buffer_aux *aux;
259 	u8 *data;
260 	int r;
261 	sector_t hash_block;
262 	unsigned offset;
263 
264 	verity_hash_at_level(v, block, level, &hash_block, &offset);
265 
266 	data = dm_bufio_read(v->bufio, hash_block, &buf);
267 	if (IS_ERR(data))
268 		return PTR_ERR(data);
269 
270 	aux = dm_bufio_get_aux_data(buf);
271 
272 	if (!aux->hash_verified) {
273 		if (skip_unverified) {
274 			r = 1;
275 			goto release_ret_r;
276 		}
277 
278 		r = verity_hash(v, verity_io_hash_desc(v, io),
279 				data, 1 << v->hash_dev_block_bits,
280 				verity_io_real_digest(v, io));
281 		if (unlikely(r < 0))
282 			goto release_ret_r;
283 
284 		if (likely(memcmp(verity_io_real_digest(v, io), want_digest,
285 				  v->digest_size) == 0))
286 			aux->hash_verified = 1;
287 		else if (verity_fec_decode(v, io,
288 					   DM_VERITY_BLOCK_TYPE_METADATA,
289 					   hash_block, data, NULL) == 0)
290 			aux->hash_verified = 1;
291 		else if (verity_handle_err(v,
292 					   DM_VERITY_BLOCK_TYPE_METADATA,
293 					   hash_block)) {
294 			r = -EIO;
295 			goto release_ret_r;
296 		}
297 	}
298 
299 	data += offset;
300 	memcpy(want_digest, data, v->digest_size);
301 	r = 0;
302 
303 release_ret_r:
304 	dm_bufio_release(buf);
305 	return r;
306 }
307 
308 /*
309  * Find a hash for a given block, write it to digest and verify the integrity
310  * of the hash tree if necessary.
311  */
312 int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
313 			  sector_t block, u8 *digest, bool *is_zero)
314 {
315 	int r = 0, i;
316 
317 	if (likely(v->levels)) {
318 		/*
319 		 * First, we try to get the requested hash for
320 		 * the current block. If the hash block itself is
321 		 * verified, zero is returned. If it isn't, this
322 		 * function returns 1 and we fall back to whole
323 		 * chain verification.
324 		 */
325 		r = verity_verify_level(v, io, block, 0, true, digest);
326 		if (likely(r <= 0))
327 			goto out;
328 	}
329 
330 	memcpy(digest, v->root_digest, v->digest_size);
331 
332 	for (i = v->levels - 1; i >= 0; i--) {
333 		r = verity_verify_level(v, io, block, i, false, digest);
334 		if (unlikely(r))
335 			goto out;
336 	}
337 out:
338 	if (!r && v->zero_digest)
339 		*is_zero = !memcmp(v->zero_digest, digest, v->digest_size);
340 	else
341 		*is_zero = false;
342 
343 	return r;
344 }
345 
346 /*
347  * Calls function process for 1 << v->data_dev_block_bits bytes in the bio_vec
348  * starting from iter.
349  */
350 int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
351 			struct bvec_iter *iter,
352 			int (*process)(struct dm_verity *v,
353 				       struct dm_verity_io *io, u8 *data,
354 				       size_t len))
355 {
356 	unsigned todo = 1 << v->data_dev_block_bits;
357 	struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
358 
359 	do {
360 		int r;
361 		u8 *page;
362 		unsigned len;
363 		struct bio_vec bv = bio_iter_iovec(bio, *iter);
364 
365 		page = kmap_atomic(bv.bv_page);
366 		len = bv.bv_len;
367 
368 		if (likely(len >= todo))
369 			len = todo;
370 
371 		r = process(v, io, page + bv.bv_offset, len);
372 		kunmap_atomic(page);
373 
374 		if (r < 0)
375 			return r;
376 
377 		bio_advance_iter(bio, iter, len);
378 		todo -= len;
379 	} while (todo);
380 
381 	return 0;
382 }
383 
384 static int verity_bv_hash_update(struct dm_verity *v, struct dm_verity_io *io,
385 				 u8 *data, size_t len)
386 {
387 	return verity_hash_update(v, verity_io_hash_desc(v, io), data, len);
388 }
389 
390 static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
391 			  u8 *data, size_t len)
392 {
393 	memset(data, 0, len);
394 	return 0;
395 }
396 
397 /*
398  * Verify one "dm_verity_io" structure.
399  */
400 static int verity_verify_io(struct dm_verity_io *io)
401 {
402 	bool is_zero;
403 	struct dm_verity *v = io->v;
404 	struct bvec_iter start;
405 	unsigned b;
406 
407 	for (b = 0; b < io->n_blocks; b++) {
408 		int r;
409 		struct shash_desc *desc = verity_io_hash_desc(v, io);
410 
411 		r = verity_hash_for_block(v, io, io->block + b,
412 					  verity_io_want_digest(v, io),
413 					  &is_zero);
414 		if (unlikely(r < 0))
415 			return r;
416 
417 		if (is_zero) {
418 			/*
419 			 * If we expect a zero block, don't validate, just
420 			 * return zeros.
421 			 */
422 			r = verity_for_bv_block(v, io, &io->iter,
423 						verity_bv_zero);
424 			if (unlikely(r < 0))
425 				return r;
426 
427 			continue;
428 		}
429 
430 		r = verity_hash_init(v, desc);
431 		if (unlikely(r < 0))
432 			return r;
433 
434 		start = io->iter;
435 		r = verity_for_bv_block(v, io, &io->iter, verity_bv_hash_update);
436 		if (unlikely(r < 0))
437 			return r;
438 
439 		r = verity_hash_final(v, desc, verity_io_real_digest(v, io));
440 		if (unlikely(r < 0))
441 			return r;
442 
443 		if (likely(memcmp(verity_io_real_digest(v, io),
444 				  verity_io_want_digest(v, io), v->digest_size) == 0))
445 			continue;
446 		else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
447 					   io->block + b, NULL, &start) == 0)
448 			continue;
449 		else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
450 					   io->block + b))
451 			return -EIO;
452 	}
453 
454 	return 0;
455 }
456 
457 /*
458  * End one "io" structure with a given error.
459  */
460 static void verity_finish_io(struct dm_verity_io *io, int error)
461 {
462 	struct dm_verity *v = io->v;
463 	struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
464 
465 	bio->bi_end_io = io->orig_bi_end_io;
466 	bio->bi_error = error;
467 
468 	verity_fec_finish_io(io);
469 
470 	bio_endio(bio);
471 }
472 
473 static void verity_work(struct work_struct *w)
474 {
475 	struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
476 
477 	verity_finish_io(io, verity_verify_io(io));
478 }
479 
480 static void verity_end_io(struct bio *bio)
481 {
482 	struct dm_verity_io *io = bio->bi_private;
483 
484 	if (bio->bi_error && !verity_fec_is_enabled(io->v)) {
485 		verity_finish_io(io, bio->bi_error);
486 		return;
487 	}
488 
489 	INIT_WORK(&io->work, verity_work);
490 	queue_work(io->v->verify_wq, &io->work);
491 }
492 
493 /*
494  * Prefetch buffers for the specified io.
495  * The root buffer is not prefetched, it is assumed that it will be cached
496  * all the time.
497  */
498 static void verity_prefetch_io(struct work_struct *work)
499 {
500 	struct dm_verity_prefetch_work *pw =
501 		container_of(work, struct dm_verity_prefetch_work, work);
502 	struct dm_verity *v = pw->v;
503 	int i;
504 
505 	for (i = v->levels - 2; i >= 0; i--) {
506 		sector_t hash_block_start;
507 		sector_t hash_block_end;
508 		verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
509 		verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
510 		if (!i) {
511 			unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
512 
513 			cluster >>= v->data_dev_block_bits;
514 			if (unlikely(!cluster))
515 				goto no_prefetch_cluster;
516 
517 			if (unlikely(cluster & (cluster - 1)))
518 				cluster = 1 << __fls(cluster);
519 
520 			hash_block_start &= ~(sector_t)(cluster - 1);
521 			hash_block_end |= cluster - 1;
522 			if (unlikely(hash_block_end >= v->hash_blocks))
523 				hash_block_end = v->hash_blocks - 1;
524 		}
525 no_prefetch_cluster:
526 		dm_bufio_prefetch(v->bufio, hash_block_start,
527 				  hash_block_end - hash_block_start + 1);
528 	}
529 
530 	kfree(pw);
531 }
532 
533 static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
534 {
535 	struct dm_verity_prefetch_work *pw;
536 
537 	pw = kmalloc(sizeof(struct dm_verity_prefetch_work),
538 		GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
539 
540 	if (!pw)
541 		return;
542 
543 	INIT_WORK(&pw->work, verity_prefetch_io);
544 	pw->v = v;
545 	pw->block = io->block;
546 	pw->n_blocks = io->n_blocks;
547 	queue_work(v->verify_wq, &pw->work);
548 }
549 
550 /*
551  * Bio map function. It allocates dm_verity_io structure and bio vector and
552  * fills them. Then it issues prefetches and the I/O.
553  */
554 static int verity_map(struct dm_target *ti, struct bio *bio)
555 {
556 	struct dm_verity *v = ti->private;
557 	struct dm_verity_io *io;
558 
559 	bio->bi_bdev = v->data_dev->bdev;
560 	bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
561 
562 	if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
563 	    ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
564 		DMERR_LIMIT("unaligned io");
565 		return -EIO;
566 	}
567 
568 	if (bio_end_sector(bio) >>
569 	    (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
570 		DMERR_LIMIT("io out of range");
571 		return -EIO;
572 	}
573 
574 	if (bio_data_dir(bio) == WRITE)
575 		return -EIO;
576 
577 	io = dm_per_bio_data(bio, ti->per_io_data_size);
578 	io->v = v;
579 	io->orig_bi_end_io = bio->bi_end_io;
580 	io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
581 	io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
582 
583 	bio->bi_end_io = verity_end_io;
584 	bio->bi_private = io;
585 	io->iter = bio->bi_iter;
586 
587 	verity_fec_init_io(io);
588 
589 	verity_submit_prefetch(v, io);
590 
591 	generic_make_request(bio);
592 
593 	return DM_MAPIO_SUBMITTED;
594 }
595 
596 /*
597  * Status: V (valid) or C (corruption found)
598  */
599 static void verity_status(struct dm_target *ti, status_type_t type,
600 			  unsigned status_flags, char *result, unsigned maxlen)
601 {
602 	struct dm_verity *v = ti->private;
603 	unsigned args = 0;
604 	unsigned sz = 0;
605 	unsigned x;
606 
607 	switch (type) {
608 	case STATUSTYPE_INFO:
609 		DMEMIT("%c", v->hash_failed ? 'C' : 'V');
610 		break;
611 	case STATUSTYPE_TABLE:
612 		DMEMIT("%u %s %s %u %u %llu %llu %s ",
613 			v->version,
614 			v->data_dev->name,
615 			v->hash_dev->name,
616 			1 << v->data_dev_block_bits,
617 			1 << v->hash_dev_block_bits,
618 			(unsigned long long)v->data_blocks,
619 			(unsigned long long)v->hash_start,
620 			v->alg_name
621 			);
622 		for (x = 0; x < v->digest_size; x++)
623 			DMEMIT("%02x", v->root_digest[x]);
624 		DMEMIT(" ");
625 		if (!v->salt_size)
626 			DMEMIT("-");
627 		else
628 			for (x = 0; x < v->salt_size; x++)
629 				DMEMIT("%02x", v->salt[x]);
630 		if (v->mode != DM_VERITY_MODE_EIO)
631 			args++;
632 		if (verity_fec_is_enabled(v))
633 			args += DM_VERITY_OPTS_FEC;
634 		if (v->zero_digest)
635 			args++;
636 		if (!args)
637 			return;
638 		DMEMIT(" %u", args);
639 		if (v->mode != DM_VERITY_MODE_EIO) {
640 			DMEMIT(" ");
641 			switch (v->mode) {
642 			case DM_VERITY_MODE_LOGGING:
643 				DMEMIT(DM_VERITY_OPT_LOGGING);
644 				break;
645 			case DM_VERITY_MODE_RESTART:
646 				DMEMIT(DM_VERITY_OPT_RESTART);
647 				break;
648 			default:
649 				BUG();
650 			}
651 		}
652 		if (v->zero_digest)
653 			DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
654 		sz = verity_fec_status_table(v, sz, result, maxlen);
655 		break;
656 	}
657 }
658 
659 static int verity_prepare_ioctl(struct dm_target *ti,
660 		struct block_device **bdev, fmode_t *mode)
661 {
662 	struct dm_verity *v = ti->private;
663 
664 	*bdev = v->data_dev->bdev;
665 
666 	if (v->data_start ||
667 	    ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT)
668 		return 1;
669 	return 0;
670 }
671 
672 static int verity_iterate_devices(struct dm_target *ti,
673 				  iterate_devices_callout_fn fn, void *data)
674 {
675 	struct dm_verity *v = ti->private;
676 
677 	return fn(ti, v->data_dev, v->data_start, ti->len, data);
678 }
679 
680 static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
681 {
682 	struct dm_verity *v = ti->private;
683 
684 	if (limits->logical_block_size < 1 << v->data_dev_block_bits)
685 		limits->logical_block_size = 1 << v->data_dev_block_bits;
686 
687 	if (limits->physical_block_size < 1 << v->data_dev_block_bits)
688 		limits->physical_block_size = 1 << v->data_dev_block_bits;
689 
690 	blk_limits_io_min(limits, limits->logical_block_size);
691 }
692 
693 static void verity_dtr(struct dm_target *ti)
694 {
695 	struct dm_verity *v = ti->private;
696 
697 	if (v->verify_wq)
698 		destroy_workqueue(v->verify_wq);
699 
700 	if (v->bufio)
701 		dm_bufio_client_destroy(v->bufio);
702 
703 	kfree(v->salt);
704 	kfree(v->root_digest);
705 	kfree(v->zero_digest);
706 
707 	if (v->tfm)
708 		crypto_free_shash(v->tfm);
709 
710 	kfree(v->alg_name);
711 
712 	if (v->hash_dev)
713 		dm_put_device(ti, v->hash_dev);
714 
715 	if (v->data_dev)
716 		dm_put_device(ti, v->data_dev);
717 
718 	verity_fec_dtr(v);
719 
720 	kfree(v);
721 }
722 
723 static int verity_alloc_zero_digest(struct dm_verity *v)
724 {
725 	int r = -ENOMEM;
726 	struct shash_desc *desc;
727 	u8 *zero_data;
728 
729 	v->zero_digest = kmalloc(v->digest_size, GFP_KERNEL);
730 
731 	if (!v->zero_digest)
732 		return r;
733 
734 	desc = kmalloc(v->shash_descsize, GFP_KERNEL);
735 
736 	if (!desc)
737 		return r; /* verity_dtr will free zero_digest */
738 
739 	zero_data = kzalloc(1 << v->data_dev_block_bits, GFP_KERNEL);
740 
741 	if (!zero_data)
742 		goto out;
743 
744 	r = verity_hash(v, desc, zero_data, 1 << v->data_dev_block_bits,
745 			v->zero_digest);
746 
747 out:
748 	kfree(desc);
749 	kfree(zero_data);
750 
751 	return r;
752 }
753 
754 static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v)
755 {
756 	int r;
757 	unsigned argc;
758 	struct dm_target *ti = v->ti;
759 	const char *arg_name;
760 
761 	static struct dm_arg _args[] = {
762 		{0, DM_VERITY_OPTS_MAX, "Invalid number of feature args"},
763 	};
764 
765 	r = dm_read_arg_group(_args, as, &argc, &ti->error);
766 	if (r)
767 		return -EINVAL;
768 
769 	if (!argc)
770 		return 0;
771 
772 	do {
773 		arg_name = dm_shift_arg(as);
774 		argc--;
775 
776 		if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING)) {
777 			v->mode = DM_VERITY_MODE_LOGGING;
778 			continue;
779 
780 		} else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART)) {
781 			v->mode = DM_VERITY_MODE_RESTART;
782 			continue;
783 
784 		} else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) {
785 			r = verity_alloc_zero_digest(v);
786 			if (r) {
787 				ti->error = "Cannot allocate zero digest";
788 				return r;
789 			}
790 			continue;
791 
792 		} else if (verity_is_fec_opt_arg(arg_name)) {
793 			r = verity_fec_parse_opt_args(as, v, &argc, arg_name);
794 			if (r)
795 				return r;
796 			continue;
797 		}
798 
799 		ti->error = "Unrecognized verity feature request";
800 		return -EINVAL;
801 	} while (argc && !r);
802 
803 	return r;
804 }
805 
806 /*
807  * Target parameters:
808  *	<version>	The current format is version 1.
809  *			Vsn 0 is compatible with original Chromium OS releases.
810  *	<data device>
811  *	<hash device>
812  *	<data block size>
813  *	<hash block size>
814  *	<the number of data blocks>
815  *	<hash start block>
816  *	<algorithm>
817  *	<digest>
818  *	<salt>		Hex string or "-" if no salt.
819  */
820 static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
821 {
822 	struct dm_verity *v;
823 	struct dm_arg_set as;
824 	unsigned int num;
825 	unsigned long long num_ll;
826 	int r;
827 	int i;
828 	sector_t hash_position;
829 	char dummy;
830 
831 	v = kzalloc(sizeof(struct dm_verity), GFP_KERNEL);
832 	if (!v) {
833 		ti->error = "Cannot allocate verity structure";
834 		return -ENOMEM;
835 	}
836 	ti->private = v;
837 	v->ti = ti;
838 
839 	r = verity_fec_ctr_alloc(v);
840 	if (r)
841 		goto bad;
842 
843 	if ((dm_table_get_mode(ti->table) & ~FMODE_READ)) {
844 		ti->error = "Device must be readonly";
845 		r = -EINVAL;
846 		goto bad;
847 	}
848 
849 	if (argc < 10) {
850 		ti->error = "Not enough arguments";
851 		r = -EINVAL;
852 		goto bad;
853 	}
854 
855 	if (sscanf(argv[0], "%u%c", &num, &dummy) != 1 ||
856 	    num > 1) {
857 		ti->error = "Invalid version";
858 		r = -EINVAL;
859 		goto bad;
860 	}
861 	v->version = num;
862 
863 	r = dm_get_device(ti, argv[1], FMODE_READ, &v->data_dev);
864 	if (r) {
865 		ti->error = "Data device lookup failed";
866 		goto bad;
867 	}
868 
869 	r = dm_get_device(ti, argv[2], FMODE_READ, &v->hash_dev);
870 	if (r) {
871 		ti->error = "Hash device lookup failed";
872 		goto bad;
873 	}
874 
875 	if (sscanf(argv[3], "%u%c", &num, &dummy) != 1 ||
876 	    !num || (num & (num - 1)) ||
877 	    num < bdev_logical_block_size(v->data_dev->bdev) ||
878 	    num > PAGE_SIZE) {
879 		ti->error = "Invalid data device block size";
880 		r = -EINVAL;
881 		goto bad;
882 	}
883 	v->data_dev_block_bits = __ffs(num);
884 
885 	if (sscanf(argv[4], "%u%c", &num, &dummy) != 1 ||
886 	    !num || (num & (num - 1)) ||
887 	    num < bdev_logical_block_size(v->hash_dev->bdev) ||
888 	    num > INT_MAX) {
889 		ti->error = "Invalid hash device block size";
890 		r = -EINVAL;
891 		goto bad;
892 	}
893 	v->hash_dev_block_bits = __ffs(num);
894 
895 	if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
896 	    (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
897 	    >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) {
898 		ti->error = "Invalid data blocks";
899 		r = -EINVAL;
900 		goto bad;
901 	}
902 	v->data_blocks = num_ll;
903 
904 	if (ti->len > (v->data_blocks << (v->data_dev_block_bits - SECTOR_SHIFT))) {
905 		ti->error = "Data device is too small";
906 		r = -EINVAL;
907 		goto bad;
908 	}
909 
910 	if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
911 	    (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT))
912 	    >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) {
913 		ti->error = "Invalid hash start";
914 		r = -EINVAL;
915 		goto bad;
916 	}
917 	v->hash_start = num_ll;
918 
919 	v->alg_name = kstrdup(argv[7], GFP_KERNEL);
920 	if (!v->alg_name) {
921 		ti->error = "Cannot allocate algorithm name";
922 		r = -ENOMEM;
923 		goto bad;
924 	}
925 
926 	v->tfm = crypto_alloc_shash(v->alg_name, 0, 0);
927 	if (IS_ERR(v->tfm)) {
928 		ti->error = "Cannot initialize hash function";
929 		r = PTR_ERR(v->tfm);
930 		v->tfm = NULL;
931 		goto bad;
932 	}
933 	v->digest_size = crypto_shash_digestsize(v->tfm);
934 	if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
935 		ti->error = "Digest size too big";
936 		r = -EINVAL;
937 		goto bad;
938 	}
939 	v->shash_descsize =
940 		sizeof(struct shash_desc) + crypto_shash_descsize(v->tfm);
941 
942 	v->root_digest = kmalloc(v->digest_size, GFP_KERNEL);
943 	if (!v->root_digest) {
944 		ti->error = "Cannot allocate root digest";
945 		r = -ENOMEM;
946 		goto bad;
947 	}
948 	if (strlen(argv[8]) != v->digest_size * 2 ||
949 	    hex2bin(v->root_digest, argv[8], v->digest_size)) {
950 		ti->error = "Invalid root digest";
951 		r = -EINVAL;
952 		goto bad;
953 	}
954 
955 	if (strcmp(argv[9], "-")) {
956 		v->salt_size = strlen(argv[9]) / 2;
957 		v->salt = kmalloc(v->salt_size, GFP_KERNEL);
958 		if (!v->salt) {
959 			ti->error = "Cannot allocate salt";
960 			r = -ENOMEM;
961 			goto bad;
962 		}
963 		if (strlen(argv[9]) != v->salt_size * 2 ||
964 		    hex2bin(v->salt, argv[9], v->salt_size)) {
965 			ti->error = "Invalid salt";
966 			r = -EINVAL;
967 			goto bad;
968 		}
969 	}
970 
971 	argv += 10;
972 	argc -= 10;
973 
974 	/* Optional parameters */
975 	if (argc) {
976 		as.argc = argc;
977 		as.argv = argv;
978 
979 		r = verity_parse_opt_args(&as, v);
980 		if (r < 0)
981 			goto bad;
982 	}
983 
984 	v->hash_per_block_bits =
985 		__fls((1 << v->hash_dev_block_bits) / v->digest_size);
986 
987 	v->levels = 0;
988 	if (v->data_blocks)
989 		while (v->hash_per_block_bits * v->levels < 64 &&
990 		       (unsigned long long)(v->data_blocks - 1) >>
991 		       (v->hash_per_block_bits * v->levels))
992 			v->levels++;
993 
994 	if (v->levels > DM_VERITY_MAX_LEVELS) {
995 		ti->error = "Too many tree levels";
996 		r = -E2BIG;
997 		goto bad;
998 	}
999 
1000 	hash_position = v->hash_start;
1001 	for (i = v->levels - 1; i >= 0; i--) {
1002 		sector_t s;
1003 		v->hash_level_block[i] = hash_position;
1004 		s = (v->data_blocks + ((sector_t)1 << ((i + 1) * v->hash_per_block_bits)) - 1)
1005 					>> ((i + 1) * v->hash_per_block_bits);
1006 		if (hash_position + s < hash_position) {
1007 			ti->error = "Hash device offset overflow";
1008 			r = -E2BIG;
1009 			goto bad;
1010 		}
1011 		hash_position += s;
1012 	}
1013 	v->hash_blocks = hash_position;
1014 
1015 	v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
1016 		1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
1017 		dm_bufio_alloc_callback, NULL);
1018 	if (IS_ERR(v->bufio)) {
1019 		ti->error = "Cannot initialize dm-bufio";
1020 		r = PTR_ERR(v->bufio);
1021 		v->bufio = NULL;
1022 		goto bad;
1023 	}
1024 
1025 	if (dm_bufio_get_device_size(v->bufio) < v->hash_blocks) {
1026 		ti->error = "Hash device is too small";
1027 		r = -E2BIG;
1028 		goto bad;
1029 	}
1030 
1031 	/* WQ_UNBOUND greatly improves performance when running on ramdisk */
1032 	v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
1033 	if (!v->verify_wq) {
1034 		ti->error = "Cannot allocate workqueue";
1035 		r = -ENOMEM;
1036 		goto bad;
1037 	}
1038 
1039 	ti->per_io_data_size = sizeof(struct dm_verity_io) +
1040 				v->shash_descsize + v->digest_size * 2;
1041 
1042 	r = verity_fec_ctr(v);
1043 	if (r)
1044 		goto bad;
1045 
1046 	ti->per_io_data_size = roundup(ti->per_io_data_size,
1047 				       __alignof__(struct dm_verity_io));
1048 
1049 	return 0;
1050 
1051 bad:
1052 	verity_dtr(ti);
1053 
1054 	return r;
1055 }
1056 
1057 static struct target_type verity_target = {
1058 	.name		= "verity",
1059 	.version	= {1, 3, 0},
1060 	.module		= THIS_MODULE,
1061 	.ctr		= verity_ctr,
1062 	.dtr		= verity_dtr,
1063 	.map		= verity_map,
1064 	.status		= verity_status,
1065 	.prepare_ioctl	= verity_prepare_ioctl,
1066 	.iterate_devices = verity_iterate_devices,
1067 	.io_hints	= verity_io_hints,
1068 };
1069 
1070 static int __init dm_verity_init(void)
1071 {
1072 	int r;
1073 
1074 	r = dm_register_target(&verity_target);
1075 	if (r < 0)
1076 		DMERR("register failed %d", r);
1077 
1078 	return r;
1079 }
1080 
1081 static void __exit dm_verity_exit(void)
1082 {
1083 	dm_unregister_target(&verity_target);
1084 }
1085 
1086 module_init(dm_verity_init);
1087 module_exit(dm_verity_exit);
1088 
1089 MODULE_AUTHOR("Mikulas Patocka <mpatocka@redhat.com>");
1090 MODULE_AUTHOR("Mandeep Baines <msb@chromium.org>");
1091 MODULE_AUTHOR("Will Drewry <wad@chromium.org>");
1092 MODULE_DESCRIPTION(DM_NAME " target for transparent disk integrity checking");
1093 MODULE_LICENSE("GPL");
1094