1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012 Red Hat, Inc.
4 *
5 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 *
7 * Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors
8 *
9 * In the file "/sys/module/dm_verity/parameters/prefetch_cluster" you can set
10 * default prefetch value. Data are read in "prefetch_cluster" chunks from the
11 * hash device. Setting this greatly improves performance when data and hash
12 * are on the same disk on different partitions on devices with poor random
13 * access behavior.
14 */
15
16 #include "dm-verity.h"
17 #include "dm-verity-fec.h"
18 #include "dm-verity-verify-sig.h"
19 #include "dm-audit.h"
20 #include <linux/hex.h>
21 #include <linux/module.h>
22 #include <linux/reboot.h>
23 #include <linux/string.h>
24 #include <linux/jump_label.h>
25 #include <linux/security.h>
26
27 #define DM_MSG_PREFIX "verity"
28
29 #define DM_VERITY_ENV_LENGTH 42
30 #define DM_VERITY_ENV_VAR_NAME "DM_VERITY_ERR_BLOCK_NR"
31
32 #define DM_VERITY_DEFAULT_PREFETCH_SIZE 262144
33 #define DM_VERITY_USE_BH_DEFAULT_BYTES 8192
34
35 #define DM_VERITY_MAX_CORRUPTED_ERRS 100
36
37 #define DM_VERITY_OPT_LOGGING "ignore_corruption"
38 #define DM_VERITY_OPT_RESTART "restart_on_corruption"
39 #define DM_VERITY_OPT_PANIC "panic_on_corruption"
40 #define DM_VERITY_OPT_ERROR_RESTART "restart_on_error"
41 #define DM_VERITY_OPT_ERROR_PANIC "panic_on_error"
42 #define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
43 #define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once"
44 #define DM_VERITY_OPT_TASKLET_VERIFY "try_verify_in_tasklet"
45
46 #define DM_VERITY_OPTS_MAX (5 + DM_VERITY_OPTS_FEC + \
47 DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
48
49 static unsigned int dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
50
51 module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, 0644);
52
53 static unsigned int dm_verity_use_bh_bytes[4] = {
54 DM_VERITY_USE_BH_DEFAULT_BYTES, // IOPRIO_CLASS_NONE
55 DM_VERITY_USE_BH_DEFAULT_BYTES, // IOPRIO_CLASS_RT
56 DM_VERITY_USE_BH_DEFAULT_BYTES, // IOPRIO_CLASS_BE
57 0 // IOPRIO_CLASS_IDLE
58 };
59
60 module_param_array_named(use_bh_bytes, dm_verity_use_bh_bytes, uint, NULL, 0644);
61
62 static DEFINE_STATIC_KEY_FALSE(use_bh_wq_enabled);
63
64 struct dm_verity_prefetch_work {
65 struct work_struct work;
66 struct dm_verity *v;
67 unsigned short ioprio;
68 sector_t block;
69 unsigned int n_blocks;
70 };
71
72 /*
73 * Auxiliary structure appended to each dm-bufio buffer. If the value
74 * hash_verified is nonzero, hash of the block has been verified.
75 *
76 * The variable hash_verified is set to 0 when allocating the buffer, then
77 * it can be changed to 1 and it is never reset to 0 again.
78 *
79 * There is no lock around this value, a race condition can at worst cause
80 * that multiple processes verify the hash of the same buffer simultaneously
81 * and write 1 to hash_verified simultaneously.
82 * This condition is harmless, so we don't need locking.
83 */
84 struct buffer_aux {
85 int hash_verified;
86 };
87
88 /*
89 * Initialize struct buffer_aux for a freshly created buffer.
90 */
dm_bufio_alloc_callback(struct dm_buffer * buf)91 static void dm_bufio_alloc_callback(struct dm_buffer *buf)
92 {
93 struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
94
95 aux->hash_verified = 0;
96 }
97
98 /*
99 * Translate input sector number to the sector number on the target device.
100 */
verity_map_sector(struct dm_verity * v,sector_t bi_sector)101 static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector)
102 {
103 return dm_target_offset(v->ti, bi_sector);
104 }
105
106 /*
107 * Return hash position of a specified block at a specified tree level
108 * (0 is the lowest level).
109 * The lowest "hash_per_block_bits"-bits of the result denote hash position
110 * inside a hash block. The remaining bits denote location of the hash block.
111 */
verity_position_at_level(struct dm_verity * v,sector_t block,int level)112 static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
113 int level)
114 {
115 return block >> (level * v->hash_per_block_bits);
116 }
117
verity_hash(struct dm_verity * v,struct dm_verity_io * io,const u8 * data,size_t len,u8 * digest)118 int verity_hash(struct dm_verity *v, struct dm_verity_io *io,
119 const u8 *data, size_t len, u8 *digest)
120 {
121 struct shash_desc *desc;
122 int r;
123
124 if (likely(v->use_sha256_lib)) {
125 struct sha256_ctx *ctx = &io->hash_ctx.sha256;
126
127 /*
128 * Fast path using SHA-256 library. This is enabled only for
129 * verity version 1, where the salt is at the beginning.
130 */
131 *ctx = *v->initial_hashstate.sha256;
132 sha256_update(ctx, data, len);
133 sha256_final(ctx, digest);
134 return 0;
135 }
136
137 desc = &io->hash_ctx.shash;
138 desc->tfm = v->shash_tfm;
139 if (unlikely(v->initial_hashstate.shash == NULL)) {
140 /* Version 0: salt at end */
141 r = crypto_shash_init(desc) ?:
142 crypto_shash_update(desc, data, len) ?:
143 crypto_shash_update(desc, v->salt, v->salt_size) ?:
144 crypto_shash_final(desc, digest);
145 } else {
146 /* Version 1: salt at beginning */
147 r = crypto_shash_import(desc, v->initial_hashstate.shash) ?:
148 crypto_shash_finup(desc, data, len, digest);
149 }
150 if (unlikely(r))
151 DMERR("Error hashing block: %d", r);
152 return r;
153 }
154
verity_hash_at_level(struct dm_verity * v,sector_t block,int level,sector_t * hash_block,unsigned int * offset)155 static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
156 sector_t *hash_block, unsigned int *offset)
157 {
158 sector_t position = verity_position_at_level(v, block, level);
159 unsigned int idx;
160
161 *hash_block = v->hash_level_block[level] + (position >> v->hash_per_block_bits);
162
163 if (!offset)
164 return;
165
166 idx = position & ((1 << v->hash_per_block_bits) - 1);
167 if (!v->version)
168 *offset = idx * v->digest_size;
169 else
170 *offset = idx << (v->hash_dev_block_bits - v->hash_per_block_bits);
171 }
172
173 /*
174 * Handle verification errors.
175 */
verity_handle_err(struct dm_verity * v,enum verity_block_type type,unsigned long long block)176 static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
177 unsigned long long block)
178 {
179 char verity_env[DM_VERITY_ENV_LENGTH];
180 char *envp[] = { verity_env, NULL };
181 const char *type_str = "";
182 struct mapped_device *md = dm_table_get_md(v->ti->table);
183
184 /* Corruption should be visible in device status in all modes */
185 v->hash_failed = true;
186
187 if (v->corrupted_errs >= DM_VERITY_MAX_CORRUPTED_ERRS)
188 goto out;
189
190 v->corrupted_errs++;
191
192 switch (type) {
193 case DM_VERITY_BLOCK_TYPE_DATA:
194 type_str = "data";
195 break;
196 case DM_VERITY_BLOCK_TYPE_METADATA:
197 type_str = "metadata";
198 break;
199 default:
200 BUG();
201 }
202
203 DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name,
204 type_str, block);
205
206 if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS) {
207 DMERR("%s: reached maximum errors", v->data_dev->name);
208 dm_audit_log_target(DM_MSG_PREFIX, "max-corrupted-errors", v->ti, 0);
209 }
210
211 snprintf(verity_env, DM_VERITY_ENV_LENGTH, "%s=%d,%llu",
212 DM_VERITY_ENV_VAR_NAME, type, block);
213
214 kobject_uevent_env(&disk_to_dev(dm_disk(md))->kobj, KOBJ_CHANGE, envp);
215
216 out:
217 if (v->mode == DM_VERITY_MODE_LOGGING)
218 return 0;
219
220 if (v->mode == DM_VERITY_MODE_RESTART)
221 kernel_restart("dm-verity device corrupted");
222
223 if (v->mode == DM_VERITY_MODE_PANIC)
224 panic("dm-verity device corrupted");
225
226 return 1;
227 }
228
229 /*
230 * Verify hash of a metadata block pertaining to the specified data block
231 * ("block" argument) at a specified level ("level" argument).
232 *
233 * On successful return, want_digest contains the hash value for a lower tree
234 * level or for the data block (if we're at the lowest level).
235 *
236 * If "skip_unverified" is true, unverified buffer is skipped and 1 is returned.
237 * If "skip_unverified" is false, unverified buffer is hashed and verified
238 * against current value of want_digest.
239 */
verity_verify_level(struct dm_verity * v,struct dm_verity_io * io,sector_t block,int level,bool skip_unverified,u8 * want_digest)240 static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
241 sector_t block, int level, bool skip_unverified,
242 u8 *want_digest)
243 {
244 struct dm_buffer *buf;
245 struct buffer_aux *aux;
246 u8 *data;
247 int r;
248 sector_t hash_block;
249 unsigned int offset;
250 struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
251
252 verity_hash_at_level(v, block, level, &hash_block, &offset);
253
254 if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
255 data = dm_bufio_get(v->bufio, hash_block, &buf);
256 if (IS_ERR_OR_NULL(data)) {
257 /*
258 * In softirq and the hash was not in the bufio cache.
259 * Return early and resume execution from a kworker to
260 * read the hash from disk.
261 */
262 return -EAGAIN;
263 }
264 } else {
265 data = dm_bufio_read_with_ioprio(v->bufio, hash_block,
266 &buf, bio->bi_ioprio);
267 }
268
269 if (IS_ERR(data)) {
270 if (skip_unverified)
271 return 1;
272 r = PTR_ERR(data);
273 data = dm_bufio_new(v->bufio, hash_block, &buf);
274 if (IS_ERR(data))
275 return r;
276 if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA,
277 want_digest, hash_block, data) == 0) {
278 aux = dm_bufio_get_aux_data(buf);
279 aux->hash_verified = 1;
280 goto release_ok;
281 } else {
282 dm_bufio_release(buf);
283 dm_bufio_forget(v->bufio, hash_block);
284 return r;
285 }
286 }
287
288 aux = dm_bufio_get_aux_data(buf);
289
290 if (!aux->hash_verified) {
291 if (skip_unverified) {
292 r = 1;
293 goto release_ret_r;
294 }
295
296 r = verity_hash(v, io, data, 1 << v->hash_dev_block_bits,
297 io->tmp_digest);
298 if (unlikely(r < 0))
299 goto release_ret_r;
300
301 if (likely(memcmp(io->tmp_digest, want_digest,
302 v->digest_size) == 0))
303 aux->hash_verified = 1;
304 else if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
305 /*
306 * Error handling code (FEC included) cannot be run in a
307 * softirq since it may sleep, so fallback to a kworker.
308 */
309 r = -EAGAIN;
310 goto release_ret_r;
311 } else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA,
312 want_digest, hash_block, data) == 0)
313 aux->hash_verified = 1;
314 else if (verity_handle_err(v,
315 DM_VERITY_BLOCK_TYPE_METADATA,
316 hash_block)) {
317 struct bio *bio;
318 io->had_mismatch = true;
319 bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
320 dm_audit_log_bio(DM_MSG_PREFIX, "verify-metadata", bio,
321 block, 0);
322 r = -EIO;
323 goto release_ret_r;
324 }
325 }
326
327 release_ok:
328 data += offset;
329 memcpy(want_digest, data, v->digest_size);
330 r = 0;
331
332 release_ret_r:
333 dm_bufio_release(buf);
334 return r;
335 }
336
337 /*
338 * Find a hash for a given block, write it to digest and verify the integrity
339 * of the hash tree if necessary.
340 */
verity_hash_for_block(struct dm_verity * v,struct dm_verity_io * io,sector_t block,u8 * digest,bool * is_zero)341 int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
342 sector_t block, u8 *digest, bool *is_zero)
343 {
344 int r = 0, i;
345
346 if (likely(v->levels)) {
347 /*
348 * First, we try to get the requested hash for
349 * the current block. If the hash block itself is
350 * verified, zero is returned. If it isn't, this
351 * function returns 1 and we fall back to whole
352 * chain verification.
353 */
354 r = verity_verify_level(v, io, block, 0, true, digest);
355 if (likely(r <= 0))
356 goto out;
357 }
358
359 memcpy(digest, v->root_digest, v->digest_size);
360
361 for (i = v->levels - 1; i >= 0; i--) {
362 r = verity_verify_level(v, io, block, i, false, digest);
363 if (unlikely(r))
364 goto out;
365 }
366 out:
367 if (!r && v->zero_digest)
368 *is_zero = !memcmp(v->zero_digest, digest, v->digest_size);
369 else
370 *is_zero = false;
371
372 return r;
373 }
374
verity_recheck(struct dm_verity * v,struct dm_verity_io * io,const u8 * want_digest,sector_t cur_block,u8 * dest)375 static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
376 const u8 *want_digest, sector_t cur_block,
377 u8 *dest)
378 {
379 struct page *page;
380 void *buffer;
381 int r;
382 struct dm_io_request io_req;
383 struct dm_io_region io_loc;
384
385 page = mempool_alloc(&v->recheck_pool, GFP_NOIO);
386 buffer = page_to_virt(page);
387
388 io_req.bi_opf = REQ_OP_READ;
389 io_req.mem.type = DM_IO_KMEM;
390 io_req.mem.ptr.addr = buffer;
391 io_req.notify.fn = NULL;
392 io_req.client = v->io;
393 io_loc.bdev = v->data_dev->bdev;
394 io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT);
395 io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT);
396 r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
397 if (unlikely(r))
398 goto free_ret;
399
400 r = verity_hash(v, io, buffer, 1 << v->data_dev_block_bits,
401 io->tmp_digest);
402 if (unlikely(r))
403 goto free_ret;
404
405 if (memcmp(io->tmp_digest, want_digest, v->digest_size)) {
406 r = -EIO;
407 goto free_ret;
408 }
409
410 memcpy(dest, buffer, 1 << v->data_dev_block_bits);
411 r = 0;
412 free_ret:
413 mempool_free(page, &v->recheck_pool);
414
415 return r;
416 }
417
verity_handle_data_hash_mismatch(struct dm_verity * v,struct dm_verity_io * io,struct bio * bio,struct pending_block * block)418 static int verity_handle_data_hash_mismatch(struct dm_verity *v,
419 struct dm_verity_io *io,
420 struct bio *bio,
421 struct pending_block *block)
422 {
423 const u8 *want_digest = block->want_digest;
424 sector_t blkno = block->blkno;
425 u8 *data = block->data;
426
427 if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
428 /*
429 * Error handling code (FEC included) cannot be run in a
430 * softirq since it may sleep, so fallback to a kworker.
431 */
432 return -EAGAIN;
433 }
434 if (verity_recheck(v, io, want_digest, blkno, data) == 0) {
435 if (v->validated_blocks)
436 set_bit(blkno, v->validated_blocks);
437 return 0;
438 }
439 if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA, want_digest,
440 blkno, data) == 0)
441 return 0;
442 if (bio->bi_status)
443 return -EIO; /* Error correction failed; Just return error */
444
445 if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA, blkno)) {
446 io->had_mismatch = true;
447 dm_audit_log_bio(DM_MSG_PREFIX, "verify-data", bio, blkno, 0);
448 return -EIO;
449 }
450 return 0;
451 }
452
verity_clear_pending_blocks(struct dm_verity_io * io)453 static void verity_clear_pending_blocks(struct dm_verity_io *io)
454 {
455 int i;
456
457 for (i = io->num_pending - 1; i >= 0; i--) {
458 kunmap_local(io->pending_blocks[i].data);
459 io->pending_blocks[i].data = NULL;
460 }
461 io->num_pending = 0;
462 }
463
verity_verify_pending_blocks(struct dm_verity * v,struct dm_verity_io * io,struct bio * bio)464 static int verity_verify_pending_blocks(struct dm_verity *v,
465 struct dm_verity_io *io,
466 struct bio *bio)
467 {
468 const unsigned int block_size = 1 << v->data_dev_block_bits;
469 int i, r;
470
471 if (io->num_pending == 2) {
472 /* num_pending == 2 implies that the algorithm is SHA-256 */
473 sha256_finup_2x(v->initial_hashstate.sha256,
474 io->pending_blocks[0].data,
475 io->pending_blocks[1].data, block_size,
476 io->pending_blocks[0].real_digest,
477 io->pending_blocks[1].real_digest);
478 } else {
479 for (i = 0; i < io->num_pending; i++) {
480 r = verity_hash(v, io, io->pending_blocks[i].data,
481 block_size,
482 io->pending_blocks[i].real_digest);
483 if (unlikely(r))
484 return r;
485 }
486 }
487
488 for (i = 0; i < io->num_pending; i++) {
489 struct pending_block *block = &io->pending_blocks[i];
490
491 if (likely(memcmp(block->real_digest, block->want_digest,
492 v->digest_size) == 0)) {
493 if (v->validated_blocks)
494 set_bit(block->blkno, v->validated_blocks);
495 } else {
496 r = verity_handle_data_hash_mismatch(v, io, bio, block);
497 if (unlikely(r))
498 return r;
499 }
500 }
501 verity_clear_pending_blocks(io);
502 return 0;
503 }
504
505 /*
506 * Verify one "dm_verity_io" structure.
507 */
verity_verify_io(struct dm_verity_io * io)508 static int verity_verify_io(struct dm_verity_io *io)
509 {
510 struct dm_verity *v = io->v;
511 const unsigned int block_size = 1 << v->data_dev_block_bits;
512 const int max_pending = v->use_sha256_finup_2x ? 2 : 1;
513 struct bvec_iter iter_copy;
514 struct bvec_iter *iter;
515 struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
516 unsigned int b;
517 int r;
518
519 io->num_pending = 0;
520
521 if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
522 /*
523 * Copy the iterator in case we need to restart verification in
524 * a kworker.
525 */
526 iter_copy = io->iter;
527 iter = &iter_copy;
528 } else
529 iter = &io->iter;
530
531 for (b = 0; b < io->n_blocks;
532 b++, bio_advance_iter_single(bio, iter, block_size)) {
533 sector_t blkno = io->block + b;
534 struct pending_block *block;
535 bool is_zero;
536 struct bio_vec bv;
537 void *data;
538
539 if (v->validated_blocks && bio->bi_status == BLK_STS_OK &&
540 likely(test_bit(blkno, v->validated_blocks)))
541 continue;
542
543 block = &io->pending_blocks[io->num_pending];
544
545 r = verity_hash_for_block(v, io, blkno, block->want_digest,
546 &is_zero);
547 if (unlikely(r < 0))
548 goto error;
549
550 bv = bio_iter_iovec(bio, *iter);
551 if (unlikely(bv.bv_len < block_size)) {
552 /*
553 * Data block spans pages. This should not happen,
554 * since dm-verity sets dma_alignment to the data block
555 * size minus 1, and dm-verity also doesn't allow the
556 * data block size to be greater than PAGE_SIZE.
557 */
558 DMERR_LIMIT("unaligned io (data block spans pages)");
559 r = -EIO;
560 goto error;
561 }
562
563 data = bvec_kmap_local(&bv);
564
565 if (is_zero) {
566 /*
567 * If we expect a zero block, don't validate, just
568 * return zeros.
569 */
570 memset(data, 0, block_size);
571 kunmap_local(data);
572 continue;
573 }
574 block->data = data;
575 block->blkno = blkno;
576 if (++io->num_pending == max_pending) {
577 r = verity_verify_pending_blocks(v, io, bio);
578 if (unlikely(r))
579 goto error;
580 }
581 }
582
583 if (io->num_pending) {
584 r = verity_verify_pending_blocks(v, io, bio);
585 if (unlikely(r))
586 goto error;
587 }
588
589 return 0;
590
591 error:
592 verity_clear_pending_blocks(io);
593 return r;
594 }
595
596 /*
597 * Skip verity work in response to I/O error when system is shutting down.
598 */
verity_is_system_shutting_down(void)599 static inline bool verity_is_system_shutting_down(void)
600 {
601 return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
602 || system_state == SYSTEM_RESTART;
603 }
604
restart_io_error(struct work_struct * w)605 static void restart_io_error(struct work_struct *w)
606 {
607 kernel_restart("dm-verity device has I/O error");
608 }
609
610 /*
611 * End one "io" structure with a given error.
612 */
verity_finish_io(struct dm_verity_io * io,blk_status_t status)613 static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
614 {
615 struct dm_verity *v = io->v;
616 struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
617
618 bio->bi_end_io = io->orig_bi_end_io;
619 bio->bi_status = status;
620
621 verity_fec_finish_io(io);
622
623 if (unlikely(status != BLK_STS_OK) &&
624 unlikely(!(bio->bi_opf & REQ_RAHEAD)) &&
625 !io->had_mismatch &&
626 !verity_is_system_shutting_down()) {
627 if (v->error_mode == DM_VERITY_MODE_PANIC) {
628 panic("dm-verity device has I/O error");
629 }
630 if (v->error_mode == DM_VERITY_MODE_RESTART) {
631 static DECLARE_WORK(restart_work, restart_io_error);
632 queue_work(v->verify_wq, &restart_work);
633 /*
634 * We deliberately don't call bio_endio here, because
635 * the machine will be restarted anyway.
636 */
637 return;
638 }
639 }
640
641 bio_endio(bio);
642 }
643
verity_work(struct work_struct * w)644 static void verity_work(struct work_struct *w)
645 {
646 struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
647
648 io->in_bh = false;
649
650 verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
651 }
652
verity_bh_work(struct work_struct * w)653 static void verity_bh_work(struct work_struct *w)
654 {
655 struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
656 int err;
657
658 io->in_bh = true;
659 err = verity_verify_io(io);
660 if (err == -EAGAIN || err == -ENOMEM) {
661 /* fallback to retrying in a kworker */
662 INIT_WORK(&io->work, verity_work);
663 queue_work(io->v->verify_wq, &io->work);
664 return;
665 }
666
667 verity_finish_io(io, errno_to_blk_status(err));
668 }
669
verity_use_bh(unsigned int bytes,unsigned short ioprio)670 static inline bool verity_use_bh(unsigned int bytes, unsigned short ioprio)
671 {
672 return ioprio <= IOPRIO_CLASS_IDLE &&
673 bytes <= READ_ONCE(dm_verity_use_bh_bytes[ioprio]) &&
674 !need_resched();
675 }
676
verity_end_io(struct bio * bio)677 static void verity_end_io(struct bio *bio)
678 {
679 struct dm_verity_io *io = bio->bi_private;
680 unsigned short ioprio = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
681 unsigned int bytes = io->n_blocks << io->v->data_dev_block_bits;
682
683 if (bio->bi_status &&
684 (!verity_fec_is_enabled(io->v) ||
685 verity_is_system_shutting_down() ||
686 (bio->bi_opf & REQ_RAHEAD))) {
687 verity_finish_io(io, bio->bi_status);
688 return;
689 }
690
691 if (static_branch_unlikely(&use_bh_wq_enabled) && io->v->use_bh_wq &&
692 verity_use_bh(bytes, ioprio)) {
693 if (in_hardirq() || irqs_disabled()) {
694 INIT_WORK(&io->work, verity_bh_work);
695 queue_work(system_bh_wq, &io->work);
696 } else {
697 verity_bh_work(&io->work);
698 }
699 } else {
700 INIT_WORK(&io->work, verity_work);
701 queue_work(io->v->verify_wq, &io->work);
702 }
703 }
704
705 /*
706 * Prefetch buffers for the specified io.
707 * The root buffer is not prefetched, it is assumed that it will be cached
708 * all the time.
709 */
verity_prefetch_io(struct work_struct * work)710 static void verity_prefetch_io(struct work_struct *work)
711 {
712 struct dm_verity_prefetch_work *pw =
713 container_of(work, struct dm_verity_prefetch_work, work);
714 struct dm_verity *v = pw->v;
715 int i;
716
717 for (i = v->levels - 2; i >= 0; i--) {
718 sector_t hash_block_start;
719 sector_t hash_block_end;
720
721 verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
722 verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
723
724 if (!i) {
725 unsigned int cluster = READ_ONCE(dm_verity_prefetch_cluster);
726
727 cluster >>= v->data_dev_block_bits;
728 if (unlikely(!cluster))
729 goto no_prefetch_cluster;
730
731 if (unlikely(cluster & (cluster - 1)))
732 cluster = 1 << __fls(cluster);
733
734 hash_block_start &= ~(sector_t)(cluster - 1);
735 hash_block_end |= cluster - 1;
736 if (unlikely(hash_block_end >= v->hash_blocks))
737 hash_block_end = v->hash_blocks - 1;
738 }
739 no_prefetch_cluster:
740 dm_bufio_prefetch_with_ioprio(v->bufio, hash_block_start,
741 hash_block_end - hash_block_start + 1,
742 pw->ioprio);
743 }
744
745 kfree(pw);
746 }
747
verity_submit_prefetch(struct dm_verity * v,struct dm_verity_io * io,unsigned short ioprio)748 static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io,
749 unsigned short ioprio)
750 {
751 sector_t block = io->block;
752 unsigned int n_blocks = io->n_blocks;
753 struct dm_verity_prefetch_work *pw;
754
755 if (v->validated_blocks) {
756 while (n_blocks && test_bit(block, v->validated_blocks)) {
757 block++;
758 n_blocks--;
759 }
760 while (n_blocks && test_bit(block + n_blocks - 1,
761 v->validated_blocks))
762 n_blocks--;
763 if (!n_blocks)
764 return;
765 }
766
767 pw = kmalloc_obj(struct dm_verity_prefetch_work,
768 GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
769
770 if (!pw)
771 return;
772
773 INIT_WORK(&pw->work, verity_prefetch_io);
774 pw->v = v;
775 pw->block = block;
776 pw->n_blocks = n_blocks;
777 pw->ioprio = ioprio;
778 queue_work(v->verify_wq, &pw->work);
779 }
780
781 /*
782 * Bio map function. It allocates dm_verity_io structure and bio vector and
783 * fills them. Then it issues prefetches and the I/O.
784 */
verity_map(struct dm_target * ti,struct bio * bio)785 static int verity_map(struct dm_target *ti, struct bio *bio)
786 {
787 struct dm_verity *v = ti->private;
788 struct dm_verity_io *io;
789
790 bio_set_dev(bio, v->data_dev->bdev);
791 bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
792
793 if (((unsigned int)bio->bi_iter.bi_sector | bio_sectors(bio)) &
794 ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
795 DMERR_LIMIT("unaligned io");
796 return DM_MAPIO_KILL;
797 }
798
799 if (bio_end_sector(bio) >>
800 (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
801 DMERR_LIMIT("io out of range");
802 return DM_MAPIO_KILL;
803 }
804
805 if (bio_data_dir(bio) == WRITE)
806 return DM_MAPIO_KILL;
807
808 io = dm_per_bio_data(bio, ti->per_io_data_size);
809 io->v = v;
810 io->orig_bi_end_io = bio->bi_end_io;
811 io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
812 io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
813 io->had_mismatch = false;
814
815 bio->bi_end_io = verity_end_io;
816 bio->bi_private = io;
817 io->iter = bio->bi_iter;
818
819 verity_fec_init_io(io);
820
821 verity_submit_prefetch(v, io, bio->bi_ioprio);
822
823 submit_bio_noacct(bio);
824
825 return DM_MAPIO_SUBMITTED;
826 }
827
verity_postsuspend(struct dm_target * ti)828 static void verity_postsuspend(struct dm_target *ti)
829 {
830 struct dm_verity *v = ti->private;
831 flush_workqueue(v->verify_wq);
832 dm_bufio_client_reset(v->bufio);
833 }
834
835 /*
836 * Status: V (valid) or C (corruption found)
837 */
verity_status(struct dm_target * ti,status_type_t type,unsigned int status_flags,char * result,unsigned int maxlen)838 static void verity_status(struct dm_target *ti, status_type_t type,
839 unsigned int status_flags, char *result, unsigned int maxlen)
840 {
841 struct dm_verity *v = ti->private;
842 unsigned int args = 0;
843 unsigned int sz = 0;
844 unsigned int x;
845
846 switch (type) {
847 case STATUSTYPE_INFO:
848 DMEMIT("%c", v->hash_failed ? 'C' : 'V');
849 if (verity_fec_is_enabled(v))
850 DMEMIT(" %lld", atomic64_read(&v->fec->corrected));
851 else
852 DMEMIT(" -");
853 break;
854 case STATUSTYPE_TABLE:
855 DMEMIT("%u %s %s %u %u %llu %llu %s ",
856 v->version,
857 v->data_dev->name,
858 v->hash_dev->name,
859 1 << v->data_dev_block_bits,
860 1 << v->hash_dev_block_bits,
861 (unsigned long long)v->data_blocks,
862 (unsigned long long)v->hash_start,
863 v->alg_name
864 );
865 for (x = 0; x < v->digest_size; x++)
866 DMEMIT("%02x", v->root_digest[x]);
867 DMEMIT(" ");
868 if (!v->salt_size)
869 DMEMIT("-");
870 else
871 for (x = 0; x < v->salt_size; x++)
872 DMEMIT("%02x", v->salt[x]);
873 if (v->mode != DM_VERITY_MODE_EIO)
874 args++;
875 if (v->error_mode != DM_VERITY_MODE_EIO)
876 args++;
877 if (verity_fec_is_enabled(v))
878 args += DM_VERITY_OPTS_FEC;
879 if (v->zero_digest)
880 args++;
881 if (v->validated_blocks)
882 args++;
883 if (v->use_bh_wq)
884 args++;
885 if (v->signature_key_desc)
886 args += DM_VERITY_ROOT_HASH_VERIFICATION_OPTS;
887 if (!args)
888 return;
889 DMEMIT(" %u", args);
890 if (v->mode != DM_VERITY_MODE_EIO) {
891 DMEMIT(" ");
892 switch (v->mode) {
893 case DM_VERITY_MODE_LOGGING:
894 DMEMIT(DM_VERITY_OPT_LOGGING);
895 break;
896 case DM_VERITY_MODE_RESTART:
897 DMEMIT(DM_VERITY_OPT_RESTART);
898 break;
899 case DM_VERITY_MODE_PANIC:
900 DMEMIT(DM_VERITY_OPT_PANIC);
901 break;
902 default:
903 BUG();
904 }
905 }
906 if (v->error_mode != DM_VERITY_MODE_EIO) {
907 DMEMIT(" ");
908 switch (v->error_mode) {
909 case DM_VERITY_MODE_RESTART:
910 DMEMIT(DM_VERITY_OPT_ERROR_RESTART);
911 break;
912 case DM_VERITY_MODE_PANIC:
913 DMEMIT(DM_VERITY_OPT_ERROR_PANIC);
914 break;
915 default:
916 BUG();
917 }
918 }
919 if (v->zero_digest)
920 DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
921 if (v->validated_blocks)
922 DMEMIT(" " DM_VERITY_OPT_AT_MOST_ONCE);
923 if (v->use_bh_wq)
924 DMEMIT(" " DM_VERITY_OPT_TASKLET_VERIFY);
925 sz = verity_fec_status_table(v, sz, result, maxlen);
926 if (v->signature_key_desc)
927 DMEMIT(" " DM_VERITY_ROOT_HASH_VERIFICATION_OPT_SIG_KEY
928 " %s", v->signature_key_desc);
929 break;
930
931 case STATUSTYPE_IMA:
932 DMEMIT_TARGET_NAME_VERSION(ti->type);
933 DMEMIT(",hash_failed=%c", v->hash_failed ? 'C' : 'V');
934 DMEMIT(",verity_version=%u", v->version);
935 DMEMIT(",data_device_name=%s", v->data_dev->name);
936 DMEMIT(",hash_device_name=%s", v->hash_dev->name);
937 DMEMIT(",verity_algorithm=%s", v->alg_name);
938
939 DMEMIT(",root_digest=");
940 for (x = 0; x < v->digest_size; x++)
941 DMEMIT("%02x", v->root_digest[x]);
942
943 DMEMIT(",salt=");
944 if (!v->salt_size)
945 DMEMIT("-");
946 else
947 for (x = 0; x < v->salt_size; x++)
948 DMEMIT("%02x", v->salt[x]);
949
950 DMEMIT(",ignore_zero_blocks=%c", v->zero_digest ? 'y' : 'n');
951 DMEMIT(",check_at_most_once=%c", v->validated_blocks ? 'y' : 'n');
952 if (v->signature_key_desc)
953 DMEMIT(",root_hash_sig_key_desc=%s", v->signature_key_desc);
954
955 if (v->mode != DM_VERITY_MODE_EIO) {
956 DMEMIT(",verity_mode=");
957 switch (v->mode) {
958 case DM_VERITY_MODE_LOGGING:
959 DMEMIT(DM_VERITY_OPT_LOGGING);
960 break;
961 case DM_VERITY_MODE_RESTART:
962 DMEMIT(DM_VERITY_OPT_RESTART);
963 break;
964 case DM_VERITY_MODE_PANIC:
965 DMEMIT(DM_VERITY_OPT_PANIC);
966 break;
967 default:
968 DMEMIT("invalid");
969 }
970 }
971 if (v->error_mode != DM_VERITY_MODE_EIO) {
972 DMEMIT(",verity_error_mode=");
973 switch (v->error_mode) {
974 case DM_VERITY_MODE_RESTART:
975 DMEMIT(DM_VERITY_OPT_ERROR_RESTART);
976 break;
977 case DM_VERITY_MODE_PANIC:
978 DMEMIT(DM_VERITY_OPT_ERROR_PANIC);
979 break;
980 default:
981 DMEMIT("invalid");
982 }
983 }
984 DMEMIT(";");
985 break;
986 }
987 }
988
verity_prepare_ioctl(struct dm_target * ti,struct block_device ** bdev,unsigned int cmd,unsigned long arg,bool * forward)989 static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
990 unsigned int cmd, unsigned long arg,
991 bool *forward)
992 {
993 struct dm_verity *v = ti->private;
994
995 *bdev = v->data_dev->bdev;
996
997 if (ti->len != bdev_nr_sectors(v->data_dev->bdev))
998 return 1;
999 return 0;
1000 }
1001
verity_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)1002 static int verity_iterate_devices(struct dm_target *ti,
1003 iterate_devices_callout_fn fn, void *data)
1004 {
1005 struct dm_verity *v = ti->private;
1006
1007 return fn(ti, v->data_dev, 0, ti->len, data);
1008 }
1009
verity_io_hints(struct dm_target * ti,struct queue_limits * limits)1010 static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
1011 {
1012 struct dm_verity *v = ti->private;
1013
1014 if (limits->logical_block_size < 1 << v->data_dev_block_bits)
1015 limits->logical_block_size = 1 << v->data_dev_block_bits;
1016
1017 if (limits->physical_block_size < 1 << v->data_dev_block_bits)
1018 limits->physical_block_size = 1 << v->data_dev_block_bits;
1019
1020 limits->io_min = limits->logical_block_size;
1021
1022 /*
1023 * Similar to what dm-crypt does, opt dm-verity out of support for
1024 * direct I/O that is aligned to less than the traditional direct I/O
1025 * alignment requirement of logical_block_size. This prevents dm-verity
1026 * data blocks from crossing pages, eliminating various edge cases.
1027 */
1028 limits->dma_alignment = limits->logical_block_size - 1;
1029 }
1030
1031 #ifdef CONFIG_SECURITY
1032
verity_init_sig(struct dm_verity * v,const void * sig,size_t sig_size)1033 static int verity_init_sig(struct dm_verity *v, const void *sig,
1034 size_t sig_size)
1035 {
1036 v->sig_size = sig_size;
1037
1038 if (sig) {
1039 v->root_digest_sig = kmemdup(sig, v->sig_size, GFP_KERNEL);
1040 if (!v->root_digest_sig)
1041 return -ENOMEM;
1042 }
1043
1044 return 0;
1045 }
1046
verity_free_sig(struct dm_verity * v)1047 static void verity_free_sig(struct dm_verity *v)
1048 {
1049 kfree(v->root_digest_sig);
1050 }
1051
1052 #else
1053
verity_init_sig(struct dm_verity * v,const void * sig,size_t sig_size)1054 static inline int verity_init_sig(struct dm_verity *v, const void *sig,
1055 size_t sig_size)
1056 {
1057 return 0;
1058 }
1059
verity_free_sig(struct dm_verity * v)1060 static inline void verity_free_sig(struct dm_verity *v)
1061 {
1062 }
1063
1064 #endif /* CONFIG_SECURITY */
1065
verity_dtr(struct dm_target * ti)1066 static void verity_dtr(struct dm_target *ti)
1067 {
1068 struct dm_verity *v = ti->private;
1069
1070 if (v->verify_wq)
1071 destroy_workqueue(v->verify_wq);
1072
1073 mempool_exit(&v->recheck_pool);
1074 if (v->io)
1075 dm_io_client_destroy(v->io);
1076
1077 if (v->bufio)
1078 dm_bufio_client_destroy(v->bufio);
1079
1080 kvfree(v->validated_blocks);
1081 kfree(v->salt);
1082 kfree(v->initial_hashstate.shash);
1083 kfree(v->root_digest);
1084 kfree(v->zero_digest);
1085 verity_free_sig(v);
1086
1087 crypto_free_shash(v->shash_tfm);
1088
1089 kfree(v->alg_name);
1090
1091 if (v->hash_dev)
1092 dm_put_device(ti, v->hash_dev);
1093
1094 if (v->data_dev)
1095 dm_put_device(ti, v->data_dev);
1096
1097 verity_fec_dtr(v);
1098
1099 kfree(v->signature_key_desc);
1100
1101 if (v->use_bh_wq)
1102 static_branch_dec(&use_bh_wq_enabled);
1103
1104 kfree(v);
1105
1106 dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1);
1107 }
1108
verity_alloc_most_once(struct dm_verity * v)1109 static int verity_alloc_most_once(struct dm_verity *v)
1110 {
1111 struct dm_target *ti = v->ti;
1112
1113 if (v->validated_blocks)
1114 return 0;
1115
1116 /* the bitset can only handle INT_MAX blocks */
1117 if (v->data_blocks > INT_MAX) {
1118 ti->error = "device too large to use check_at_most_once";
1119 return -E2BIG;
1120 }
1121
1122 v->validated_blocks = kvcalloc(BITS_TO_LONGS(v->data_blocks),
1123 sizeof(unsigned long),
1124 GFP_KERNEL);
1125 if (!v->validated_blocks) {
1126 ti->error = "failed to allocate bitset for check_at_most_once";
1127 return -ENOMEM;
1128 }
1129
1130 return 0;
1131 }
1132
verity_alloc_zero_digest(struct dm_verity * v)1133 static int verity_alloc_zero_digest(struct dm_verity *v)
1134 {
1135 int r = -ENOMEM;
1136 struct dm_verity_io *io;
1137 u8 *zero_data;
1138
1139 if (v->zero_digest)
1140 return 0;
1141
1142 v->zero_digest = kmalloc(v->digest_size, GFP_KERNEL);
1143
1144 if (!v->zero_digest)
1145 return r;
1146
1147 io = kmalloc(v->ti->per_io_data_size, GFP_KERNEL);
1148
1149 if (!io)
1150 return r; /* verity_dtr will free zero_digest */
1151
1152 zero_data = kzalloc(1 << v->data_dev_block_bits, GFP_KERNEL);
1153
1154 if (!zero_data)
1155 goto out;
1156
1157 r = verity_hash(v, io, zero_data, 1 << v->data_dev_block_bits,
1158 v->zero_digest);
1159
1160 out:
1161 kfree(io);
1162 kfree(zero_data);
1163
1164 return r;
1165 }
1166
verity_is_verity_mode(const char * arg_name)1167 static inline bool verity_is_verity_mode(const char *arg_name)
1168 {
1169 return (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING) ||
1170 !strcasecmp(arg_name, DM_VERITY_OPT_RESTART) ||
1171 !strcasecmp(arg_name, DM_VERITY_OPT_PANIC));
1172 }
1173
verity_parse_verity_mode(struct dm_verity * v,const char * arg_name)1174 static int verity_parse_verity_mode(struct dm_verity *v, const char *arg_name)
1175 {
1176 if (v->mode)
1177 return -EINVAL;
1178
1179 if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING))
1180 v->mode = DM_VERITY_MODE_LOGGING;
1181 else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART))
1182 v->mode = DM_VERITY_MODE_RESTART;
1183 else if (!strcasecmp(arg_name, DM_VERITY_OPT_PANIC))
1184 v->mode = DM_VERITY_MODE_PANIC;
1185
1186 return 0;
1187 }
1188
verity_is_verity_error_mode(const char * arg_name)1189 static inline bool verity_is_verity_error_mode(const char *arg_name)
1190 {
1191 return (!strcasecmp(arg_name, DM_VERITY_OPT_ERROR_RESTART) ||
1192 !strcasecmp(arg_name, DM_VERITY_OPT_ERROR_PANIC));
1193 }
1194
verity_parse_verity_error_mode(struct dm_verity * v,const char * arg_name)1195 static int verity_parse_verity_error_mode(struct dm_verity *v, const char *arg_name)
1196 {
1197 if (v->error_mode)
1198 return -EINVAL;
1199
1200 if (!strcasecmp(arg_name, DM_VERITY_OPT_ERROR_RESTART))
1201 v->error_mode = DM_VERITY_MODE_RESTART;
1202 else if (!strcasecmp(arg_name, DM_VERITY_OPT_ERROR_PANIC))
1203 v->error_mode = DM_VERITY_MODE_PANIC;
1204
1205 return 0;
1206 }
1207
verity_parse_opt_args(struct dm_arg_set * as,struct dm_verity * v,struct dm_verity_sig_opts * verify_args,bool only_modifier_opts)1208 static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
1209 struct dm_verity_sig_opts *verify_args,
1210 bool only_modifier_opts)
1211 {
1212 int r = 0;
1213 unsigned int argc;
1214 struct dm_target *ti = v->ti;
1215 const char *arg_name;
1216
1217 static const struct dm_arg _args[] = {
1218 {0, DM_VERITY_OPTS_MAX, "Invalid number of feature args"},
1219 };
1220
1221 r = dm_read_arg_group(_args, as, &argc, &ti->error);
1222 if (r)
1223 return -EINVAL;
1224
1225 if (!argc)
1226 return 0;
1227
1228 do {
1229 arg_name = dm_shift_arg(as);
1230 argc--;
1231
1232 if (verity_is_verity_mode(arg_name)) {
1233 if (only_modifier_opts)
1234 continue;
1235 r = verity_parse_verity_mode(v, arg_name);
1236 if (r) {
1237 ti->error = "Conflicting error handling parameters";
1238 return r;
1239 }
1240 continue;
1241
1242 } else if (verity_is_verity_error_mode(arg_name)) {
1243 if (only_modifier_opts)
1244 continue;
1245 r = verity_parse_verity_error_mode(v, arg_name);
1246 if (r) {
1247 ti->error = "Conflicting error handling parameters";
1248 return r;
1249 }
1250 continue;
1251
1252 } else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) {
1253 if (only_modifier_opts)
1254 continue;
1255 r = verity_alloc_zero_digest(v);
1256 if (r) {
1257 ti->error = "Cannot allocate zero digest";
1258 return r;
1259 }
1260 continue;
1261
1262 } else if (!strcasecmp(arg_name, DM_VERITY_OPT_AT_MOST_ONCE)) {
1263 if (only_modifier_opts)
1264 continue;
1265 r = verity_alloc_most_once(v);
1266 if (r)
1267 return r;
1268 continue;
1269
1270 } else if (!strcasecmp(arg_name, DM_VERITY_OPT_TASKLET_VERIFY)) {
1271 v->use_bh_wq = true;
1272 static_branch_inc(&use_bh_wq_enabled);
1273 continue;
1274
1275 } else if (verity_is_fec_opt_arg(arg_name)) {
1276 if (only_modifier_opts)
1277 continue;
1278 r = verity_fec_parse_opt_args(as, v, &argc, arg_name);
1279 if (r)
1280 return r;
1281 continue;
1282
1283 } else if (verity_verify_is_sig_opt_arg(arg_name)) {
1284 if (only_modifier_opts)
1285 continue;
1286 r = verity_verify_sig_parse_opt_args(as, v,
1287 verify_args,
1288 &argc, arg_name);
1289 if (r)
1290 return r;
1291 continue;
1292
1293 } else if (only_modifier_opts) {
1294 /*
1295 * Ignore unrecognized opt, could easily be an extra
1296 * argument to an option whose parsing was skipped.
1297 * Normal parsing (@only_modifier_opts=false) will
1298 * properly parse all options (and their extra args).
1299 */
1300 continue;
1301 }
1302
1303 DMERR("Unrecognized verity feature request: %s", arg_name);
1304 ti->error = "Unrecognized verity feature request";
1305 return -EINVAL;
1306 } while (argc && !r);
1307
1308 return r;
1309 }
1310
verity_setup_hash_alg(struct dm_verity * v,const char * alg_name)1311 static int verity_setup_hash_alg(struct dm_verity *v, const char *alg_name)
1312 {
1313 struct dm_target *ti = v->ti;
1314 struct crypto_shash *shash;
1315
1316 v->alg_name = kstrdup(alg_name, GFP_KERNEL);
1317 if (!v->alg_name) {
1318 ti->error = "Cannot allocate algorithm name";
1319 return -ENOMEM;
1320 }
1321
1322 shash = crypto_alloc_shash(alg_name, 0, 0);
1323 if (IS_ERR(shash)) {
1324 ti->error = "Cannot initialize hash function";
1325 return PTR_ERR(shash);
1326 }
1327 v->shash_tfm = shash;
1328 v->digest_size = crypto_shash_digestsize(shash);
1329 if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
1330 ti->error = "Digest size too big";
1331 return -EINVAL;
1332 }
1333 if (likely(v->version && strcmp(alg_name, "sha256") == 0)) {
1334 /*
1335 * Fast path: use the library API for reduced overhead and
1336 * interleaved hashing support.
1337 */
1338 v->use_sha256_lib = true;
1339 if (sha256_finup_2x_is_optimized())
1340 v->use_sha256_finup_2x = true;
1341 ti->per_io_data_size =
1342 offsetofend(struct dm_verity_io, hash_ctx.sha256);
1343 } else {
1344 /* Fallback case: use the generic crypto API. */
1345 ti->per_io_data_size =
1346 offsetofend(struct dm_verity_io, hash_ctx.shash) +
1347 crypto_shash_descsize(shash);
1348 }
1349 return 0;
1350 }
1351
verity_setup_salt_and_hashstate(struct dm_verity * v,const char * arg)1352 static int verity_setup_salt_and_hashstate(struct dm_verity *v, const char *arg)
1353 {
1354 struct dm_target *ti = v->ti;
1355
1356 if (strcmp(arg, "-") != 0) {
1357 v->salt_size = strlen(arg) / 2;
1358 v->salt = kmalloc(v->salt_size, GFP_KERNEL);
1359 if (!v->salt) {
1360 ti->error = "Cannot allocate salt";
1361 return -ENOMEM;
1362 }
1363 if (strlen(arg) != v->salt_size * 2 ||
1364 hex2bin(v->salt, arg, v->salt_size)) {
1365 ti->error = "Invalid salt";
1366 return -EINVAL;
1367 }
1368 }
1369 if (likely(v->use_sha256_lib)) {
1370 /* Implies version 1: salt at beginning */
1371 v->initial_hashstate.sha256 =
1372 kmalloc_obj(struct sha256_ctx);
1373 if (!v->initial_hashstate.sha256) {
1374 ti->error = "Cannot allocate initial hash state";
1375 return -ENOMEM;
1376 }
1377 sha256_init(v->initial_hashstate.sha256);
1378 sha256_update(v->initial_hashstate.sha256,
1379 v->salt, v->salt_size);
1380 } else if (v->version) { /* Version 1: salt at beginning */
1381 SHASH_DESC_ON_STACK(desc, v->shash_tfm);
1382 int r;
1383
1384 /*
1385 * Compute the pre-salted hash state that can be passed to
1386 * crypto_shash_import() for each block later.
1387 */
1388 v->initial_hashstate.shash = kmalloc(
1389 crypto_shash_statesize(v->shash_tfm), GFP_KERNEL);
1390 if (!v->initial_hashstate.shash) {
1391 ti->error = "Cannot allocate initial hash state";
1392 return -ENOMEM;
1393 }
1394 desc->tfm = v->shash_tfm;
1395 r = crypto_shash_init(desc) ?:
1396 crypto_shash_update(desc, v->salt, v->salt_size) ?:
1397 crypto_shash_export(desc, v->initial_hashstate.shash);
1398 if (r) {
1399 ti->error = "Cannot set up initial hash state";
1400 return r;
1401 }
1402 }
1403 return 0;
1404 }
1405
1406 /*
1407 * Target parameters:
1408 * <version> The current format is version 1.
1409 * Vsn 0 is compatible with original Chromium OS releases.
1410 * <data device>
1411 * <hash device>
1412 * <data block size>
1413 * <hash block size>
1414 * <the number of data blocks>
1415 * <hash start block>
1416 * <algorithm>
1417 * <digest>
1418 * <salt> Hex string or "-" if no salt.
1419 */
verity_ctr(struct dm_target * ti,unsigned int argc,char ** argv)1420 static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1421 {
1422 struct dm_verity *v;
1423 struct dm_verity_sig_opts verify_args = {0};
1424 struct dm_arg_set as;
1425 unsigned int num;
1426 unsigned long long num_ll;
1427 int r;
1428 int i;
1429 sector_t hash_position;
1430 char dummy;
1431 char *root_hash_digest_to_validate;
1432
1433 v = kzalloc_obj(struct dm_verity);
1434 if (!v) {
1435 ti->error = "Cannot allocate verity structure";
1436 return -ENOMEM;
1437 }
1438 ti->private = v;
1439 v->ti = ti;
1440
1441 r = verity_fec_ctr_alloc(v);
1442 if (r)
1443 goto bad;
1444
1445 if ((dm_table_get_mode(ti->table) & ~BLK_OPEN_READ)) {
1446 ti->error = "Device must be readonly";
1447 r = -EINVAL;
1448 goto bad;
1449 }
1450
1451 if (argc < 10) {
1452 ti->error = "Not enough arguments";
1453 r = -EINVAL;
1454 goto bad;
1455 }
1456
1457 /* Parse optional parameters that modify primary args */
1458 if (argc > 10) {
1459 as.argc = argc - 10;
1460 as.argv = argv + 10;
1461 r = verity_parse_opt_args(&as, v, &verify_args, true);
1462 if (r < 0)
1463 goto bad;
1464 }
1465
1466 if (sscanf(argv[0], "%u%c", &num, &dummy) != 1 ||
1467 num > 1) {
1468 ti->error = "Invalid version";
1469 r = -EINVAL;
1470 goto bad;
1471 }
1472 v->version = num;
1473
1474 r = dm_get_device(ti, argv[1], BLK_OPEN_READ, &v->data_dev);
1475 if (r) {
1476 ti->error = "Data device lookup failed";
1477 goto bad;
1478 }
1479
1480 r = dm_get_device(ti, argv[2], BLK_OPEN_READ, &v->hash_dev);
1481 if (r) {
1482 ti->error = "Hash device lookup failed";
1483 goto bad;
1484 }
1485
1486 if (sscanf(argv[3], "%u%c", &num, &dummy) != 1 ||
1487 !num || (num & (num - 1)) ||
1488 num < bdev_logical_block_size(v->data_dev->bdev) ||
1489 num > PAGE_SIZE) {
1490 ti->error = "Invalid data device block size";
1491 r = -EINVAL;
1492 goto bad;
1493 }
1494 v->data_dev_block_bits = __ffs(num);
1495
1496 if (sscanf(argv[4], "%u%c", &num, &dummy) != 1 ||
1497 !num || (num & (num - 1)) ||
1498 num < bdev_logical_block_size(v->hash_dev->bdev) ||
1499 num > INT_MAX) {
1500 ti->error = "Invalid hash device block size";
1501 r = -EINVAL;
1502 goto bad;
1503 }
1504 v->hash_dev_block_bits = __ffs(num);
1505
1506 if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
1507 (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
1508 >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) {
1509 ti->error = "Invalid data blocks";
1510 r = -EINVAL;
1511 goto bad;
1512 }
1513 v->data_blocks = num_ll;
1514
1515 if (ti->len > (v->data_blocks << (v->data_dev_block_bits - SECTOR_SHIFT))) {
1516 ti->error = "Data device is too small";
1517 r = -EINVAL;
1518 goto bad;
1519 }
1520
1521 if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
1522 (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT))
1523 >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) {
1524 ti->error = "Invalid hash start";
1525 r = -EINVAL;
1526 goto bad;
1527 }
1528 v->hash_start = num_ll;
1529
1530 r = verity_setup_hash_alg(v, argv[7]);
1531 if (r)
1532 goto bad;
1533
1534 v->root_digest = kmalloc(v->digest_size, GFP_KERNEL);
1535 if (!v->root_digest) {
1536 ti->error = "Cannot allocate root digest";
1537 r = -ENOMEM;
1538 goto bad;
1539 }
1540 if (strlen(argv[8]) != v->digest_size * 2 ||
1541 hex2bin(v->root_digest, argv[8], v->digest_size)) {
1542 ti->error = "Invalid root digest";
1543 r = -EINVAL;
1544 goto bad;
1545 }
1546 root_hash_digest_to_validate = argv[8];
1547
1548 r = verity_setup_salt_and_hashstate(v, argv[9]);
1549 if (r)
1550 goto bad;
1551
1552 argv += 10;
1553 argc -= 10;
1554
1555 /* Optional parameters */
1556 if (argc) {
1557 as.argc = argc;
1558 as.argv = argv;
1559 r = verity_parse_opt_args(&as, v, &verify_args, false);
1560 if (r < 0)
1561 goto bad;
1562 }
1563
1564 /* Root hash signature is an optional parameter */
1565 r = verity_verify_root_hash(root_hash_digest_to_validate,
1566 strlen(root_hash_digest_to_validate),
1567 verify_args.sig,
1568 verify_args.sig_size);
1569 if (r < 0) {
1570 ti->error = "Root hash verification failed";
1571 goto bad;
1572 }
1573
1574 r = verity_init_sig(v, verify_args.sig, verify_args.sig_size);
1575 if (r < 0) {
1576 ti->error = "Cannot allocate root digest signature";
1577 goto bad;
1578 }
1579
1580 v->hash_per_block_bits =
1581 __fls((1 << v->hash_dev_block_bits) / v->digest_size);
1582
1583 v->levels = 0;
1584 if (v->data_blocks)
1585 while (v->hash_per_block_bits * v->levels < 64 &&
1586 (unsigned long long)(v->data_blocks - 1) >>
1587 (v->hash_per_block_bits * v->levels))
1588 v->levels++;
1589
1590 if (v->levels > DM_VERITY_MAX_LEVELS) {
1591 ti->error = "Too many tree levels";
1592 r = -E2BIG;
1593 goto bad;
1594 }
1595
1596 hash_position = v->hash_start;
1597 for (i = v->levels - 1; i >= 0; i--) {
1598 sector_t s;
1599
1600 v->hash_level_block[i] = hash_position;
1601 s = (v->data_blocks + ((sector_t)1 << ((i + 1) * v->hash_per_block_bits)) - 1)
1602 >> ((i + 1) * v->hash_per_block_bits);
1603 if (hash_position + s < hash_position) {
1604 ti->error = "Hash device offset overflow";
1605 r = -E2BIG;
1606 goto bad;
1607 }
1608 hash_position += s;
1609 }
1610 v->hash_blocks = hash_position;
1611
1612 r = mempool_init_page_pool(&v->recheck_pool, 1, 0);
1613 if (unlikely(r)) {
1614 ti->error = "Cannot allocate mempool";
1615 goto bad;
1616 }
1617
1618 v->io = dm_io_client_create();
1619 if (IS_ERR(v->io)) {
1620 r = PTR_ERR(v->io);
1621 v->io = NULL;
1622 ti->error = "Cannot allocate dm io";
1623 goto bad;
1624 }
1625
1626 v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
1627 1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
1628 dm_bufio_alloc_callback, NULL,
1629 v->use_bh_wq ? DM_BUFIO_CLIENT_NO_SLEEP : 0);
1630 if (IS_ERR(v->bufio)) {
1631 ti->error = "Cannot initialize dm-bufio";
1632 r = PTR_ERR(v->bufio);
1633 v->bufio = NULL;
1634 goto bad;
1635 }
1636
1637 if (dm_bufio_get_device_size(v->bufio) < v->hash_blocks) {
1638 ti->error = "Hash device is too small";
1639 r = -E2BIG;
1640 goto bad;
1641 }
1642
1643 /*
1644 * Using WQ_HIGHPRI improves throughput and completion latency by
1645 * reducing wait times when reading from a dm-verity device.
1646 *
1647 * Also as required for the "try_verify_in_tasklet" feature: WQ_HIGHPRI
1648 * allows verify_wq to preempt softirq since verification in softirq
1649 * will fall-back to using it for error handling (or if the bufio cache
1650 * doesn't have required hashes).
1651 */
1652 v->verify_wq = alloc_workqueue("kverityd",
1653 WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_PERCPU,
1654 0);
1655 if (!v->verify_wq) {
1656 ti->error = "Cannot allocate workqueue";
1657 r = -ENOMEM;
1658 goto bad;
1659 }
1660
1661 r = verity_fec_ctr(v);
1662 if (r)
1663 goto bad;
1664
1665 ti->per_io_data_size = roundup(ti->per_io_data_size,
1666 __alignof__(struct dm_verity_io));
1667
1668 verity_verify_sig_opts_cleanup(&verify_args);
1669
1670 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
1671
1672 return 0;
1673
1674 bad:
1675
1676 verity_verify_sig_opts_cleanup(&verify_args);
1677 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0);
1678 verity_dtr(ti);
1679
1680 return r;
1681 }
1682
1683 /*
1684 * Get the verity mode (error behavior) of a verity target.
1685 *
1686 * Returns the verity mode of the target, or -EINVAL if 'ti' is not a verity
1687 * target.
1688 */
dm_verity_get_mode(struct dm_target * ti)1689 int dm_verity_get_mode(struct dm_target *ti)
1690 {
1691 struct dm_verity *v = ti->private;
1692
1693 if (!dm_is_verity_target(ti))
1694 return -EINVAL;
1695
1696 return v->mode;
1697 }
1698
1699 /*
1700 * Get the root digest of a verity target.
1701 *
1702 * Returns a copy of the root digest, the caller is responsible for
1703 * freeing the memory of the digest.
1704 */
dm_verity_get_root_digest(struct dm_target * ti,u8 ** root_digest,unsigned int * digest_size)1705 int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned int *digest_size)
1706 {
1707 struct dm_verity *v = ti->private;
1708
1709 if (!dm_is_verity_target(ti))
1710 return -EINVAL;
1711
1712 *root_digest = kmemdup(v->root_digest, v->digest_size, GFP_KERNEL);
1713 if (*root_digest == NULL)
1714 return -ENOMEM;
1715
1716 *digest_size = v->digest_size;
1717
1718 return 0;
1719 }
1720
1721 #ifdef CONFIG_SECURITY
1722
1723 #ifdef CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG
1724
verity_security_set_signature(struct block_device * bdev,struct dm_verity * v)1725 static int verity_security_set_signature(struct block_device *bdev,
1726 struct dm_verity *v)
1727 {
1728 /*
1729 * if the dm-verity target is unsigned, v->root_digest_sig will
1730 * be NULL, and the hook call is still required to let LSMs mark
1731 * the device as unsigned. This information is crucial for LSMs to
1732 * block operations such as execution on unsigned files
1733 */
1734 return security_bdev_setintegrity(bdev,
1735 LSM_INT_DMVERITY_SIG_VALID,
1736 v->root_digest_sig,
1737 v->sig_size);
1738 }
1739
1740 #else
1741
verity_security_set_signature(struct block_device * bdev,struct dm_verity * v)1742 static inline int verity_security_set_signature(struct block_device *bdev,
1743 struct dm_verity *v)
1744 {
1745 return 0;
1746 }
1747
1748 #endif /* CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG */
1749
1750 /*
1751 * Expose verity target's root hash and signature data to LSMs before resume.
1752 *
1753 * Returns 0 on success, or -ENOMEM if the system is out of memory.
1754 */
verity_preresume(struct dm_target * ti)1755 static int verity_preresume(struct dm_target *ti)
1756 {
1757 struct block_device *bdev;
1758 struct dm_verity_digest root_digest;
1759 struct dm_verity *v;
1760 int r;
1761
1762 v = ti->private;
1763 bdev = dm_disk(dm_table_get_md(ti->table))->part0;
1764 root_digest.digest = v->root_digest;
1765 root_digest.digest_len = v->digest_size;
1766 root_digest.alg = crypto_shash_alg_name(v->shash_tfm);
1767
1768 r = security_bdev_setintegrity(bdev, LSM_INT_DMVERITY_ROOTHASH, &root_digest,
1769 sizeof(root_digest));
1770 if (r)
1771 return r;
1772
1773 r = verity_security_set_signature(bdev, v);
1774 if (r)
1775 goto bad;
1776
1777 return 0;
1778
1779 bad:
1780
1781 security_bdev_setintegrity(bdev, LSM_INT_DMVERITY_ROOTHASH, NULL, 0);
1782
1783 return r;
1784 }
1785
1786 #endif /* CONFIG_SECURITY */
1787
1788 static struct target_type verity_target = {
1789 .name = "verity",
1790 /* Note: the LSMs depend on the singleton and immutable features */
1791 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
1792 .version = {1, 13, 0},
1793 .module = THIS_MODULE,
1794 .ctr = verity_ctr,
1795 .dtr = verity_dtr,
1796 .map = verity_map,
1797 .postsuspend = verity_postsuspend,
1798 .status = verity_status,
1799 .prepare_ioctl = verity_prepare_ioctl,
1800 .iterate_devices = verity_iterate_devices,
1801 .io_hints = verity_io_hints,
1802 #ifdef CONFIG_SECURITY
1803 .preresume = verity_preresume,
1804 #endif /* CONFIG_SECURITY */
1805 };
1806
dm_verity_init(void)1807 static int __init dm_verity_init(void)
1808 {
1809 int r;
1810
1811 r = dm_verity_verify_sig_init();
1812 if (r)
1813 return r;
1814
1815 r = dm_register_target(&verity_target);
1816 if (r) {
1817 dm_verity_verify_sig_exit();
1818 return r;
1819 }
1820
1821 return 0;
1822 }
1823 module_init(dm_verity_init);
1824
dm_verity_exit(void)1825 static void __exit dm_verity_exit(void)
1826 {
1827 dm_unregister_target(&verity_target);
1828 dm_verity_verify_sig_exit();
1829 }
1830 module_exit(dm_verity_exit);
1831
1832 /*
1833 * Check whether a DM target is a verity target.
1834 */
dm_is_verity_target(struct dm_target * ti)1835 bool dm_is_verity_target(struct dm_target *ti)
1836 {
1837 return ti->type == &verity_target;
1838 }
1839
1840 MODULE_AUTHOR("Mikulas Patocka <mpatocka@redhat.com>");
1841 MODULE_AUTHOR("Mandeep Baines <msb@chromium.org>");
1842 MODULE_AUTHOR("Will Drewry <wad@chromium.org>");
1843 MODULE_DESCRIPTION(DM_NAME " target for transparent disk integrity checking");
1844 MODULE_LICENSE("GPL");
1845