1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2013, 2016 by Delphix. All rights reserved.
25 * Copyright 2013 Saso Kiselkov. All rights reserved.
26 */
27
28 #include <sys/zfs_context.h>
29 #include <sys/spa.h>
30 #include <sys/spa_impl.h>
31 #include <sys/zio.h>
32 #include <sys/zio_checksum.h>
33 #include <sys/zil.h>
34 #include <sys/abd.h>
35 #include <zfs_fletcher.h>
36
37 /*
38 * Checksum vectors.
39 *
40 * In the SPA, everything is checksummed. We support checksum vectors
41 * for three distinct reasons:
42 *
43 * 1. Different kinds of data need different levels of protection.
44 * For SPA metadata, we always want a very strong checksum.
45 * For user data, we let users make the trade-off between speed
46 * and checksum strength.
47 *
48 * 2. Cryptographic hash and MAC algorithms are an area of active research.
49 * It is likely that in future hash functions will be at least as strong
50 * as current best-of-breed, and may be substantially faster as well.
51 * We want the ability to take advantage of these new hashes as soon as
52 * they become available.
53 *
54 * 3. If someone develops hardware that can compute a strong hash quickly,
55 * we want the ability to take advantage of that hardware.
56 *
57 * Of course, we don't want a checksum upgrade to invalidate existing
58 * data, so we store the checksum *function* in eight bits of the bp.
59 * This gives us room for up to 256 different checksum functions.
60 *
61 * When writing a block, we always checksum it with the latest-and-greatest
62 * checksum function of the appropriate strength. When reading a block,
63 * we compare the expected checksum against the actual checksum, which we
64 * compute via the checksum function specified by BP_GET_CHECKSUM(bp).
65 *
66 * SALTED CHECKSUMS
67 *
68 * To enable the use of less secure hash algorithms with dedup, we
69 * introduce the notion of salted checksums (MACs, really). A salted
70 * checksum is fed both a random 256-bit value (the salt) and the data
71 * to be checksummed. This salt is kept secret (stored on the pool, but
72 * never shown to the user). Thus even if an attacker knew of collision
73 * weaknesses in the hash algorithm, they won't be able to mount a known
74 * plaintext attack on the DDT, since the actual hash value cannot be
75 * known ahead of time. How the salt is used is algorithm-specific
76 * (some might simply prefix it to the data block, others might need to
77 * utilize a full-blown HMAC). On disk the salt is stored in a ZAP
78 * object in the MOS (DMU_POOL_CHECKSUM_SALT).
79 *
80 * CONTEXT TEMPLATES
81 *
82 * Some hashing algorithms need to perform a substantial amount of
83 * initialization work (e.g. salted checksums above may need to pre-hash
84 * the salt) before being able to process data. Performing this
85 * redundant work for each block would be wasteful, so we instead allow
86 * a checksum algorithm to do the work once (the first time it's used)
87 * and then keep this pre-initialized context as a template inside the
88 * spa_t (spa_cksum_tmpls). If the zio_checksum_info_t contains
89 * non-NULL ci_tmpl_init and ci_tmpl_free callbacks, they are used to
90 * construct and destruct the pre-initialized checksum context. The
91 * pre-initialized context is then reused during each checksum
92 * invocation and passed to the checksum function.
93 */
94
95 static void
abd_checksum_off(abd_t * abd,uint64_t size,const void * ctx_template,zio_cksum_t * zcp)96 abd_checksum_off(abd_t *abd, uint64_t size,
97 const void *ctx_template, zio_cksum_t *zcp)
98 {
99 (void) abd, (void) size, (void) ctx_template;
100 ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
101 }
102
103 static void
abd_fletcher_2_native(abd_t * abd,uint64_t size,const void * ctx_template,zio_cksum_t * zcp)104 abd_fletcher_2_native(abd_t *abd, uint64_t size,
105 const void *ctx_template, zio_cksum_t *zcp)
106 {
107 (void) ctx_template;
108 fletcher_init(zcp);
109 (void) abd_iterate_func(abd, 0, size,
110 fletcher_2_incremental_native, zcp);
111 }
112
113 static void
abd_fletcher_2_byteswap(abd_t * abd,uint64_t size,const void * ctx_template,zio_cksum_t * zcp)114 abd_fletcher_2_byteswap(abd_t *abd, uint64_t size,
115 const void *ctx_template, zio_cksum_t *zcp)
116 {
117 (void) ctx_template;
118 fletcher_init(zcp);
119 (void) abd_iterate_func(abd, 0, size,
120 fletcher_2_incremental_byteswap, zcp);
121 }
122
123 static inline void
abd_fletcher_4_impl(abd_t * abd,uint64_t size,zio_abd_checksum_data_t * acdp)124 abd_fletcher_4_impl(abd_t *abd, uint64_t size, zio_abd_checksum_data_t *acdp)
125 {
126 fletcher_4_abd_ops.acf_init(acdp);
127 abd_iterate_func(abd, 0, size, fletcher_4_abd_ops.acf_iter, acdp);
128 fletcher_4_abd_ops.acf_fini(acdp);
129 }
130
131 void
abd_fletcher_4_native(abd_t * abd,uint64_t size,const void * ctx_template,zio_cksum_t * zcp)132 abd_fletcher_4_native(abd_t *abd, uint64_t size,
133 const void *ctx_template, zio_cksum_t *zcp)
134 {
135 (void) ctx_template;
136 fletcher_4_ctx_t ctx;
137
138 zio_abd_checksum_data_t acd = {
139 .acd_byteorder = ZIO_CHECKSUM_NATIVE,
140 .acd_zcp = zcp,
141 .acd_ctx = &ctx
142 };
143
144 abd_fletcher_4_impl(abd, size, &acd);
145
146 }
147
148 void
abd_fletcher_4_byteswap(abd_t * abd,uint64_t size,const void * ctx_template,zio_cksum_t * zcp)149 abd_fletcher_4_byteswap(abd_t *abd, uint64_t size,
150 const void *ctx_template, zio_cksum_t *zcp)
151 {
152 (void) ctx_template;
153 fletcher_4_ctx_t ctx;
154
155 zio_abd_checksum_data_t acd = {
156 .acd_byteorder = ZIO_CHECKSUM_BYTESWAP,
157 .acd_zcp = zcp,
158 .acd_ctx = &ctx
159 };
160
161 abd_fletcher_4_impl(abd, size, &acd);
162 }
163
164 /*
165 * Checksum vectors.
166 *
167 * Note: you cannot change the name string for these functions, as they are
168 * embedded in on-disk data in some places (eg dedup table names).
169 */
170 zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = {
171 {{NULL, NULL}, NULL, NULL, 0, "inherit"},
172 {{NULL, NULL}, NULL, NULL, 0, "on"},
173 {{abd_checksum_off, abd_checksum_off},
174 NULL, NULL, 0, "off"},
175 {{abd_checksum_sha256, abd_checksum_sha256},
176 NULL, NULL, ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_EMBEDDED,
177 "label"},
178 {{abd_checksum_sha256, abd_checksum_sha256},
179 NULL, NULL, ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_EMBEDDED,
180 "gang_header"},
181 {{abd_fletcher_2_native, abd_fletcher_2_byteswap},
182 NULL, NULL, ZCHECKSUM_FLAG_EMBEDDED, "zilog"},
183 {{abd_fletcher_2_native, abd_fletcher_2_byteswap},
184 NULL, NULL, 0, "fletcher2"},
185 {{abd_fletcher_4_native, abd_fletcher_4_byteswap},
186 NULL, NULL, ZCHECKSUM_FLAG_METADATA, "fletcher4"},
187 {{abd_checksum_sha256, abd_checksum_sha256},
188 NULL, NULL, ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_DEDUP |
189 ZCHECKSUM_FLAG_NOPWRITE, "sha256"},
190 {{abd_fletcher_4_native, abd_fletcher_4_byteswap},
191 NULL, NULL, ZCHECKSUM_FLAG_EMBEDDED, "zilog2"},
192 {{abd_checksum_off, abd_checksum_off},
193 NULL, NULL, 0, "noparity"},
194 {{abd_checksum_sha512_native, abd_checksum_sha512_byteswap},
195 NULL, NULL, ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_DEDUP |
196 ZCHECKSUM_FLAG_NOPWRITE, "sha512"},
197 {{abd_checksum_skein_native, abd_checksum_skein_byteswap},
198 abd_checksum_skein_tmpl_init, abd_checksum_skein_tmpl_free,
199 ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_DEDUP |
200 ZCHECKSUM_FLAG_SALTED | ZCHECKSUM_FLAG_NOPWRITE, "skein"},
201 {{abd_checksum_edonr_native, abd_checksum_edonr_byteswap},
202 abd_checksum_edonr_tmpl_init, abd_checksum_edonr_tmpl_free,
203 ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_SALTED |
204 ZCHECKSUM_FLAG_NOPWRITE, "edonr"},
205 {{abd_checksum_blake3_native, abd_checksum_blake3_byteswap},
206 abd_checksum_blake3_tmpl_init, abd_checksum_blake3_tmpl_free,
207 ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_DEDUP |
208 ZCHECKSUM_FLAG_SALTED | ZCHECKSUM_FLAG_NOPWRITE, "blake3"},
209 };
210
211 /*
212 * The flag corresponding to the "verify" in dedup=[checksum,]verify
213 * must be cleared first, so callers should use ZIO_CHECKSUM_MASK.
214 */
215 spa_feature_t
zio_checksum_to_feature(enum zio_checksum cksum)216 zio_checksum_to_feature(enum zio_checksum cksum)
217 {
218 VERIFY((cksum & ~ZIO_CHECKSUM_MASK) == 0);
219
220 switch (cksum) {
221 case ZIO_CHECKSUM_BLAKE3:
222 return (SPA_FEATURE_BLAKE3);
223 case ZIO_CHECKSUM_SHA512:
224 return (SPA_FEATURE_SHA512);
225 case ZIO_CHECKSUM_SKEIN:
226 return (SPA_FEATURE_SKEIN);
227 case ZIO_CHECKSUM_EDONR:
228 return (SPA_FEATURE_EDONR);
229 default:
230 return (SPA_FEATURE_NONE);
231 }
232 }
233
234 enum zio_checksum
zio_checksum_select(enum zio_checksum child,enum zio_checksum parent)235 zio_checksum_select(enum zio_checksum child, enum zio_checksum parent)
236 {
237 ASSERT(child < ZIO_CHECKSUM_FUNCTIONS);
238 ASSERT(parent < ZIO_CHECKSUM_FUNCTIONS);
239 ASSERT(parent != ZIO_CHECKSUM_INHERIT && parent != ZIO_CHECKSUM_ON);
240
241 if (child == ZIO_CHECKSUM_INHERIT)
242 return (parent);
243
244 if (child == ZIO_CHECKSUM_ON)
245 return (ZIO_CHECKSUM_ON_VALUE);
246
247 return (child);
248 }
249
250 enum zio_checksum
zio_checksum_dedup_select(spa_t * spa,enum zio_checksum child,enum zio_checksum parent)251 zio_checksum_dedup_select(spa_t *spa, enum zio_checksum child,
252 enum zio_checksum parent)
253 {
254 ASSERT((child & ZIO_CHECKSUM_MASK) < ZIO_CHECKSUM_FUNCTIONS);
255 ASSERT((parent & ZIO_CHECKSUM_MASK) < ZIO_CHECKSUM_FUNCTIONS);
256 ASSERT(parent != ZIO_CHECKSUM_INHERIT && parent != ZIO_CHECKSUM_ON);
257
258 if (child == ZIO_CHECKSUM_INHERIT)
259 return (parent);
260
261 if (child == ZIO_CHECKSUM_ON)
262 return (spa_dedup_checksum(spa));
263
264 if (child == (ZIO_CHECKSUM_ON | ZIO_CHECKSUM_VERIFY))
265 return (spa_dedup_checksum(spa) | ZIO_CHECKSUM_VERIFY);
266
267 ASSERT((zio_checksum_table[child & ZIO_CHECKSUM_MASK].ci_flags &
268 ZCHECKSUM_FLAG_DEDUP) ||
269 (child & ZIO_CHECKSUM_VERIFY) || child == ZIO_CHECKSUM_OFF);
270
271 return (child);
272 }
273
274 /*
275 * Set the external verifier for a gang block based on <vdev, offset, txg>,
276 * a tuple which is guaranteed to be unique for the life of the pool.
277 */
278 static void
zio_checksum_gang_verifier(zio_cksum_t * zcp,const blkptr_t * bp)279 zio_checksum_gang_verifier(zio_cksum_t *zcp, const blkptr_t *bp)
280 {
281 const dva_t *dva = BP_IDENTITY(bp);
282 uint64_t txg = BP_GET_BIRTH(bp);
283
284 ASSERT(BP_IS_GANG(bp));
285
286 ZIO_SET_CHECKSUM(zcp, DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva), txg, 0);
287 }
288
289 /*
290 * Set the external verifier for a label block based on its offset.
291 * The vdev is implicit, and the txg is unknowable at pool open time --
292 * hence the logic in vdev_uberblock_load() to find the most recent copy.
293 */
294 static void
zio_checksum_label_verifier(zio_cksum_t * zcp,uint64_t offset)295 zio_checksum_label_verifier(zio_cksum_t *zcp, uint64_t offset)
296 {
297 ZIO_SET_CHECKSUM(zcp, offset, 0, 0, 0);
298 }
299
300 /*
301 * Calls the template init function of a checksum which supports context
302 * templates and installs the template into the spa_t.
303 */
304 static void
zio_checksum_template_init(enum zio_checksum checksum,spa_t * spa)305 zio_checksum_template_init(enum zio_checksum checksum, spa_t *spa)
306 {
307 zio_checksum_info_t *ci = &zio_checksum_table[checksum];
308
309 if (ci->ci_tmpl_init == NULL)
310 return;
311 if (spa->spa_cksum_tmpls[checksum] != NULL)
312 return;
313
314 VERIFY(ci->ci_tmpl_free != NULL);
315 mutex_enter(&spa->spa_cksum_tmpls_lock);
316 if (spa->spa_cksum_tmpls[checksum] == NULL) {
317 spa->spa_cksum_tmpls[checksum] =
318 ci->ci_tmpl_init(&spa->spa_cksum_salt);
319 VERIFY(spa->spa_cksum_tmpls[checksum] != NULL);
320 }
321 mutex_exit(&spa->spa_cksum_tmpls_lock);
322 }
323
324 /* convenience function to update a checksum to accommodate an encryption MAC */
325 static void
zio_checksum_handle_crypt(zio_cksum_t * cksum,zio_cksum_t * saved,boolean_t xor)326 zio_checksum_handle_crypt(zio_cksum_t *cksum, zio_cksum_t *saved, boolean_t xor)
327 {
328 /*
329 * Weak checksums do not have their entropy spread evenly
330 * across the bits of the checksum. Therefore, when truncating
331 * a weak checksum we XOR the first 2 words with the last 2 so
332 * that we don't "lose" any entropy unnecessarily.
333 */
334 if (xor) {
335 cksum->zc_word[0] ^= cksum->zc_word[2];
336 cksum->zc_word[1] ^= cksum->zc_word[3];
337 }
338
339 cksum->zc_word[2] = saved->zc_word[2];
340 cksum->zc_word[3] = saved->zc_word[3];
341 }
342
343 /*
344 * Generate the checksum.
345 */
346 void
zio_checksum_compute(zio_t * zio,enum zio_checksum checksum,abd_t * abd,uint64_t size)347 zio_checksum_compute(zio_t *zio, enum zio_checksum checksum,
348 abd_t *abd, uint64_t size)
349 {
350 static const uint64_t zec_magic = ZEC_MAGIC;
351 blkptr_t *bp = zio->io_bp;
352 uint64_t offset = zio->io_offset;
353 zio_checksum_info_t *ci = &zio_checksum_table[checksum];
354 zio_cksum_t cksum, saved;
355 spa_t *spa = zio->io_spa;
356 boolean_t insecure = (ci->ci_flags & ZCHECKSUM_FLAG_DEDUP) == 0;
357
358 ASSERT((uint_t)checksum < ZIO_CHECKSUM_FUNCTIONS);
359 ASSERT(ci->ci_func[0] != NULL);
360
361 zio_checksum_template_init(checksum, spa);
362
363 if (ci->ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
364 zio_eck_t eck;
365 size_t eck_offset;
366
367 memset(&saved, 0, sizeof (zio_cksum_t));
368
369 if (checksum == ZIO_CHECKSUM_ZILOG2) {
370 zil_chain_t zilc;
371 abd_copy_to_buf(&zilc, abd, sizeof (zil_chain_t));
372
373 uint64_t nused = P2ROUNDUP_TYPED(zilc.zc_nused,
374 ZIL_MIN_BLKSZ, uint64_t);
375 ASSERT3U(size, >=, nused);
376 size = nused;
377 eck = zilc.zc_eck;
378 eck_offset = offsetof(zil_chain_t, zc_eck);
379 } else {
380 ASSERT3U(size, >=, sizeof (zio_eck_t));
381 eck_offset = size - sizeof (zio_eck_t);
382 abd_copy_to_buf_off(&eck, abd, eck_offset,
383 sizeof (zio_eck_t));
384 }
385
386 if (checksum == ZIO_CHECKSUM_GANG_HEADER) {
387 zio_checksum_gang_verifier(&eck.zec_cksum, bp);
388 } else if (checksum == ZIO_CHECKSUM_LABEL) {
389 zio_checksum_label_verifier(&eck.zec_cksum, offset);
390 } else {
391 saved = eck.zec_cksum;
392 eck.zec_cksum = bp->blk_cksum;
393 }
394
395 abd_copy_from_buf_off(abd, &zec_magic,
396 eck_offset + offsetof(zio_eck_t, zec_magic),
397 sizeof (zec_magic));
398 abd_copy_from_buf_off(abd, &eck.zec_cksum,
399 eck_offset + offsetof(zio_eck_t, zec_cksum),
400 sizeof (zio_cksum_t));
401
402 ci->ci_func[0](abd, size, spa->spa_cksum_tmpls[checksum],
403 &cksum);
404 if (bp != NULL && BP_USES_CRYPT(bp) &&
405 BP_GET_TYPE(bp) != DMU_OT_OBJSET)
406 zio_checksum_handle_crypt(&cksum, &saved, insecure);
407
408 abd_copy_from_buf_off(abd, &cksum,
409 eck_offset + offsetof(zio_eck_t, zec_cksum),
410 sizeof (zio_cksum_t));
411 } else {
412 saved = bp->blk_cksum;
413 ci->ci_func[0](abd, size, spa->spa_cksum_tmpls[checksum],
414 &cksum);
415 if (BP_USES_CRYPT(bp) && BP_GET_TYPE(bp) != DMU_OT_OBJSET)
416 zio_checksum_handle_crypt(&cksum, &saved, insecure);
417 bp->blk_cksum = cksum;
418 }
419 }
420
421 int
zio_checksum_error_impl(spa_t * spa,const blkptr_t * bp,enum zio_checksum checksum,abd_t * abd,uint64_t size,uint64_t offset,zio_bad_cksum_t * info)422 zio_checksum_error_impl(spa_t *spa, const blkptr_t *bp,
423 enum zio_checksum checksum, abd_t *abd, uint64_t size, uint64_t offset,
424 zio_bad_cksum_t *info)
425 {
426 zio_checksum_info_t *ci = &zio_checksum_table[checksum];
427 zio_cksum_t actual_cksum, expected_cksum;
428 zio_eck_t eck;
429 int byteswap;
430
431 if (checksum >= ZIO_CHECKSUM_FUNCTIONS || ci->ci_func[0] == NULL)
432 return (SET_ERROR(EINVAL));
433
434 zio_checksum_template_init(checksum, spa);
435
436 IMPLY(bp == NULL, ci->ci_flags & ZCHECKSUM_FLAG_EMBEDDED);
437 IMPLY(bp == NULL, checksum == ZIO_CHECKSUM_LABEL);
438
439 if (ci->ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
440 zio_cksum_t verifier;
441 size_t eck_offset;
442
443 if (checksum == ZIO_CHECKSUM_ZILOG2) {
444 zil_chain_t zilc;
445 uint64_t nused;
446
447 abd_copy_to_buf(&zilc, abd, sizeof (zil_chain_t));
448
449 eck = zilc.zc_eck;
450 eck_offset = offsetof(zil_chain_t, zc_eck) +
451 offsetof(zio_eck_t, zec_cksum);
452
453 if (eck.zec_magic == ZEC_MAGIC) {
454 nused = zilc.zc_nused;
455 } else if (eck.zec_magic == BSWAP_64(ZEC_MAGIC)) {
456 nused = BSWAP_64(zilc.zc_nused);
457 } else {
458 return (SET_ERROR(ECKSUM));
459 }
460
461 nused = P2ROUNDUP_TYPED(nused, ZIL_MIN_BLKSZ, uint64_t);
462 if (size < nused)
463 return (SET_ERROR(ECKSUM));
464 size = nused;
465 } else {
466 if (size < sizeof (zio_eck_t))
467 return (SET_ERROR(ECKSUM));
468 eck_offset = size - sizeof (zio_eck_t);
469 abd_copy_to_buf_off(&eck, abd, eck_offset,
470 sizeof (zio_eck_t));
471 eck_offset += offsetof(zio_eck_t, zec_cksum);
472 }
473
474 if (checksum == ZIO_CHECKSUM_GANG_HEADER)
475 zio_checksum_gang_verifier(&verifier, bp);
476 else if (checksum == ZIO_CHECKSUM_LABEL)
477 zio_checksum_label_verifier(&verifier, offset);
478 else
479 verifier = bp->blk_cksum;
480
481 byteswap = (eck.zec_magic == BSWAP_64(ZEC_MAGIC));
482
483 if (byteswap)
484 byteswap_uint64_array(&verifier, sizeof (zio_cksum_t));
485
486 expected_cksum = eck.zec_cksum;
487
488 abd_copy_from_buf_off(abd, &verifier, eck_offset,
489 sizeof (zio_cksum_t));
490
491 ci->ci_func[byteswap](abd, size,
492 spa->spa_cksum_tmpls[checksum], &actual_cksum);
493
494 abd_copy_from_buf_off(abd, &expected_cksum, eck_offset,
495 sizeof (zio_cksum_t));
496
497 if (byteswap) {
498 byteswap_uint64_array(&expected_cksum,
499 sizeof (zio_cksum_t));
500 }
501 } else {
502 byteswap = BP_SHOULD_BYTESWAP(bp);
503 expected_cksum = bp->blk_cksum;
504 ci->ci_func[byteswap](abd, size,
505 spa->spa_cksum_tmpls[checksum], &actual_cksum);
506 }
507
508 /*
509 * MAC checksums are a special case since half of this checksum will
510 * actually be the encryption MAC. This will be verified by the
511 * decryption process, so we just check the truncated checksum now.
512 * Objset blocks use embedded MACs so we don't truncate the checksum
513 * for them.
514 */
515 if (bp != NULL && BP_USES_CRYPT(bp) &&
516 BP_GET_TYPE(bp) != DMU_OT_OBJSET) {
517 if (!(ci->ci_flags & ZCHECKSUM_FLAG_DEDUP)) {
518 actual_cksum.zc_word[0] ^= actual_cksum.zc_word[2];
519 actual_cksum.zc_word[1] ^= actual_cksum.zc_word[3];
520 }
521
522 actual_cksum.zc_word[2] = 0;
523 actual_cksum.zc_word[3] = 0;
524 expected_cksum.zc_word[2] = 0;
525 expected_cksum.zc_word[3] = 0;
526 }
527
528 if (info != NULL) {
529 info->zbc_checksum_name = ci->ci_name;
530 info->zbc_byteswapped = byteswap;
531 info->zbc_injected = 0;
532 info->zbc_has_cksum = 1;
533 }
534
535 if (!ZIO_CHECKSUM_EQUAL(actual_cksum, expected_cksum))
536 return (SET_ERROR(ECKSUM));
537
538 return (0);
539 }
540
541 int
zio_checksum_error(zio_t * zio,zio_bad_cksum_t * info)542 zio_checksum_error(zio_t *zio, zio_bad_cksum_t *info)
543 {
544 blkptr_t *bp = zio->io_bp;
545 uint_t checksum = (bp == NULL ? zio->io_prop.zp_checksum :
546 (BP_IS_GANG(bp) ? ZIO_CHECKSUM_GANG_HEADER : BP_GET_CHECKSUM(bp)));
547 int error;
548 uint64_t size = (bp == NULL ? zio->io_size :
549 (BP_IS_GANG(bp) ? SPA_GANGBLOCKSIZE : BP_GET_PSIZE(bp)));
550 uint64_t offset = zio->io_offset;
551 abd_t *data = zio->io_abd;
552 spa_t *spa = zio->io_spa;
553
554 error = zio_checksum_error_impl(spa, bp, checksum, data, size,
555 offset, info);
556
557 if (zio_injection_enabled && error == 0 && zio->io_error == 0) {
558 error = zio_handle_fault_injection(zio, ECKSUM);
559 if (error != 0)
560 info->zbc_injected = 1;
561 }
562
563 return (error);
564 }
565
566 /*
567 * Called by a spa_t that's about to be deallocated. This steps through
568 * all of the checksum context templates and deallocates any that were
569 * initialized using the algorithm-specific template init function.
570 */
571 void
zio_checksum_templates_free(spa_t * spa)572 zio_checksum_templates_free(spa_t *spa)
573 {
574 for (enum zio_checksum checksum = 0;
575 checksum < ZIO_CHECKSUM_FUNCTIONS; checksum++) {
576 if (spa->spa_cksum_tmpls[checksum] != NULL) {
577 zio_checksum_info_t *ci = &zio_checksum_table[checksum];
578
579 VERIFY(ci->ci_tmpl_free != NULL);
580 ci->ci_tmpl_free(spa->spa_cksum_tmpls[checksum]);
581 spa->spa_cksum_tmpls[checksum] = NULL;
582 }
583 }
584 }
585