1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Based on BLAKE3 v1.3.1, https://github.com/BLAKE3-team/BLAKE3 24 * Copyright (c) 2019-2020 Samuel Neves and Jack O'Connor 25 * Copyright (c) 2021-2022 Tino Reichardt <milky-zfs@mcmilk.de> 26 */ 27 28 #include <sys/simd.h> 29 #include <sys/zfs_context.h> 30 #include <sys/blake3.h> 31 32 #include "blake3_impl.h" 33 34 /* 35 * We need 1056 byte stack for blake3_compress_subtree_wide() 36 * - we define this pragma to make gcc happy 37 */ 38 #if defined(__GNUC__) 39 #pragma GCC diagnostic ignored "-Wframe-larger-than=" 40 #endif 41 42 /* internal used */ 43 typedef struct { 44 uint32_t input_cv[8]; 45 uint64_t counter; 46 uint8_t block[BLAKE3_BLOCK_LEN]; 47 uint8_t block_len; 48 uint8_t flags; 49 } output_t; 50 51 /* internal flags */ 52 enum blake3_flags { 53 CHUNK_START = 1 << 0, 54 CHUNK_END = 1 << 1, 55 PARENT = 1 << 2, 56 ROOT = 1 << 3, 57 KEYED_HASH = 1 << 4, 58 DERIVE_KEY_CONTEXT = 1 << 5, 59 DERIVE_KEY_MATERIAL = 1 << 6, 60 }; 61 62 /* internal start */ 63 static void chunk_state_init(blake3_chunk_state_t *ctx, 64 const uint32_t key[8], uint8_t flags) 65 { 66 memcpy(ctx->cv, key, BLAKE3_KEY_LEN); 67 ctx->chunk_counter = 0; 68 memset(ctx->buf, 0, BLAKE3_BLOCK_LEN); 69 ctx->buf_len = 0; 70 ctx->blocks_compressed = 0; 71 ctx->flags = flags; 72 } 73 74 static void chunk_state_reset(blake3_chunk_state_t *ctx, 75 const uint32_t key[8], uint64_t chunk_counter) 76 { 77 memcpy(ctx->cv, key, BLAKE3_KEY_LEN); 78 ctx->chunk_counter = chunk_counter; 79 ctx->blocks_compressed = 0; 80 memset(ctx->buf, 0, BLAKE3_BLOCK_LEN); 81 ctx->buf_len = 0; 82 } 83 84 static size_t chunk_state_len(const blake3_chunk_state_t *ctx) 85 { 86 return (BLAKE3_BLOCK_LEN * (size_t)ctx->blocks_compressed) + 87 ((size_t)ctx->buf_len); 88 } 89 90 static size_t chunk_state_fill_buf(blake3_chunk_state_t *ctx, 91 const uint8_t *input, size_t input_len) 92 { 93 size_t take = BLAKE3_BLOCK_LEN - ((size_t)ctx->buf_len); 94 if (take > input_len) { 95 take = input_len; 96 } 97 uint8_t *dest = ctx->buf + ((size_t)ctx->buf_len); 98 memcpy(dest, input, take); 99 ctx->buf_len += (uint8_t)take; 100 return (take); 101 } 102 103 static uint8_t chunk_state_maybe_start_flag(const blake3_chunk_state_t *ctx) 104 { 105 if (ctx->blocks_compressed == 0) { 106 return (CHUNK_START); 107 } else { 108 return (0); 109 } 110 } 111 112 static output_t make_output(const uint32_t input_cv[8], 113 const uint8_t *block, uint8_t block_len, 114 uint64_t counter, uint8_t flags) 115 { 116 output_t ret; 117 memcpy(ret.input_cv, input_cv, 32); 118 memcpy(ret.block, block, BLAKE3_BLOCK_LEN); 119 ret.block_len = block_len; 120 ret.counter = counter; 121 ret.flags = flags; 122 return (ret); 123 } 124 125 /* 126 * Chaining values within a given chunk (specifically the compress_in_place 127 * interface) are represented as words. This avoids unnecessary bytes<->words 128 * conversion overhead in the portable implementation. However, the hash_many 129 * interface handles both user input and parent node blocks, so it accepts 130 * bytes. For that reason, chaining values in the CV stack are represented as 131 * bytes. 132 */ 133 static void output_chaining_value(const blake3_ops_t *ops, 134 const output_t *ctx, uint8_t cv[32]) 135 { 136 uint32_t cv_words[8]; 137 memcpy(cv_words, ctx->input_cv, 32); 138 ops->compress_in_place(cv_words, ctx->block, ctx->block_len, 139 ctx->counter, ctx->flags); 140 store_cv_words(cv, cv_words); 141 } 142 143 static void output_root_bytes(const blake3_ops_t *ops, const output_t *ctx, 144 uint64_t seek, uint8_t *out, size_t out_len) 145 { 146 uint64_t output_block_counter = seek / 64; 147 size_t offset_within_block = seek % 64; 148 uint8_t wide_buf[64]; 149 while (out_len > 0) { 150 ops->compress_xof(ctx->input_cv, ctx->block, ctx->block_len, 151 output_block_counter, ctx->flags | ROOT, wide_buf); 152 size_t available_bytes = 64 - offset_within_block; 153 size_t memcpy_len; 154 if (out_len > available_bytes) { 155 memcpy_len = available_bytes; 156 } else { 157 memcpy_len = out_len; 158 } 159 memcpy(out, wide_buf + offset_within_block, memcpy_len); 160 out += memcpy_len; 161 out_len -= memcpy_len; 162 output_block_counter += 1; 163 offset_within_block = 0; 164 } 165 } 166 167 static void chunk_state_update(const blake3_ops_t *ops, 168 blake3_chunk_state_t *ctx, const uint8_t *input, size_t input_len) 169 { 170 if (ctx->buf_len > 0) { 171 size_t take = chunk_state_fill_buf(ctx, input, input_len); 172 input += take; 173 input_len -= take; 174 if (input_len > 0) { 175 ops->compress_in_place(ctx->cv, ctx->buf, 176 BLAKE3_BLOCK_LEN, ctx->chunk_counter, 177 ctx->flags|chunk_state_maybe_start_flag(ctx)); 178 ctx->blocks_compressed += 1; 179 ctx->buf_len = 0; 180 memset(ctx->buf, 0, BLAKE3_BLOCK_LEN); 181 } 182 } 183 184 while (input_len > BLAKE3_BLOCK_LEN) { 185 ops->compress_in_place(ctx->cv, input, BLAKE3_BLOCK_LEN, 186 ctx->chunk_counter, 187 ctx->flags|chunk_state_maybe_start_flag(ctx)); 188 ctx->blocks_compressed += 1; 189 input += BLAKE3_BLOCK_LEN; 190 input_len -= BLAKE3_BLOCK_LEN; 191 } 192 193 chunk_state_fill_buf(ctx, input, input_len); 194 } 195 196 static output_t chunk_state_output(const blake3_chunk_state_t *ctx) 197 { 198 uint8_t block_flags = 199 ctx->flags | chunk_state_maybe_start_flag(ctx) | CHUNK_END; 200 return (make_output(ctx->cv, ctx->buf, ctx->buf_len, ctx->chunk_counter, 201 block_flags)); 202 } 203 204 static output_t parent_output(const uint8_t block[BLAKE3_BLOCK_LEN], 205 const uint32_t key[8], uint8_t flags) 206 { 207 return (make_output(key, block, BLAKE3_BLOCK_LEN, 0, flags | PARENT)); 208 } 209 210 /* 211 * Given some input larger than one chunk, return the number of bytes that 212 * should go in the left subtree. This is the largest power-of-2 number of 213 * chunks that leaves at least 1 byte for the right subtree. 214 */ 215 static size_t left_len(size_t content_len) 216 { 217 /* 218 * Subtract 1 to reserve at least one byte for the right side. 219 * content_len 220 * should always be greater than BLAKE3_CHUNK_LEN. 221 */ 222 size_t full_chunks = (content_len - 1) / BLAKE3_CHUNK_LEN; 223 return (round_down_to_power_of_2(full_chunks) * BLAKE3_CHUNK_LEN); 224 } 225 226 /* 227 * Use SIMD parallelism to hash up to MAX_SIMD_DEGREE chunks at the same time 228 * on a single thread. Write out the chunk chaining values and return the 229 * number of chunks hashed. These chunks are never the root and never empty; 230 * those cases use a different codepath. 231 */ 232 static size_t compress_chunks_parallel(const blake3_ops_t *ops, 233 const uint8_t *input, size_t input_len, const uint32_t key[8], 234 uint64_t chunk_counter, uint8_t flags, uint8_t *out) 235 { 236 const uint8_t *chunks_array[MAX_SIMD_DEGREE]; 237 size_t input_position = 0; 238 size_t chunks_array_len = 0; 239 while (input_len - input_position >= BLAKE3_CHUNK_LEN) { 240 chunks_array[chunks_array_len] = &input[input_position]; 241 input_position += BLAKE3_CHUNK_LEN; 242 chunks_array_len += 1; 243 } 244 245 ops->hash_many(chunks_array, chunks_array_len, BLAKE3_CHUNK_LEN / 246 BLAKE3_BLOCK_LEN, key, chunk_counter, B_TRUE, flags, CHUNK_START, 247 CHUNK_END, out); 248 249 /* 250 * Hash the remaining partial chunk, if there is one. Note that the 251 * empty chunk (meaning the empty message) is a different codepath. 252 */ 253 if (input_len > input_position) { 254 uint64_t counter = chunk_counter + (uint64_t)chunks_array_len; 255 blake3_chunk_state_t chunk_state; 256 chunk_state_init(&chunk_state, key, flags); 257 chunk_state.chunk_counter = counter; 258 chunk_state_update(ops, &chunk_state, &input[input_position], 259 input_len - input_position); 260 output_t output = chunk_state_output(&chunk_state); 261 output_chaining_value(ops, &output, &out[chunks_array_len * 262 BLAKE3_OUT_LEN]); 263 return (chunks_array_len + 1); 264 } else { 265 return (chunks_array_len); 266 } 267 } 268 269 /* 270 * Use SIMD parallelism to hash up to MAX_SIMD_DEGREE parents at the same time 271 * on a single thread. Write out the parent chaining values and return the 272 * number of parents hashed. (If there's an odd input chaining value left over, 273 * return it as an additional output.) These parents are never the root and 274 * never empty; those cases use a different codepath. 275 */ 276 static size_t compress_parents_parallel(const blake3_ops_t *ops, 277 const uint8_t *child_chaining_values, size_t num_chaining_values, 278 const uint32_t key[8], uint8_t flags, uint8_t *out) 279 { 280 const uint8_t *parents_array[MAX_SIMD_DEGREE_OR_2] = {0}; 281 size_t parents_array_len = 0; 282 283 while (num_chaining_values - (2 * parents_array_len) >= 2) { 284 parents_array[parents_array_len] = &child_chaining_values[2 * 285 parents_array_len * BLAKE3_OUT_LEN]; 286 parents_array_len += 1; 287 } 288 289 ops->hash_many(parents_array, parents_array_len, 1, key, 0, B_FALSE, 290 flags | PARENT, 0, 0, out); 291 292 /* If there's an odd child left over, it becomes an output. */ 293 if (num_chaining_values > 2 * parents_array_len) { 294 memcpy(&out[parents_array_len * BLAKE3_OUT_LEN], 295 &child_chaining_values[2 * parents_array_len * 296 BLAKE3_OUT_LEN], BLAKE3_OUT_LEN); 297 return (parents_array_len + 1); 298 } else { 299 return (parents_array_len); 300 } 301 } 302 303 /* 304 * The wide helper function returns (writes out) an array of chaining values 305 * and returns the length of that array. The number of chaining values returned 306 * is the dyanmically detected SIMD degree, at most MAX_SIMD_DEGREE. Or fewer, 307 * if the input is shorter than that many chunks. The reason for maintaining a 308 * wide array of chaining values going back up the tree, is to allow the 309 * implementation to hash as many parents in parallel as possible. 310 * 311 * As a special case when the SIMD degree is 1, this function will still return 312 * at least 2 outputs. This guarantees that this function doesn't perform the 313 * root compression. (If it did, it would use the wrong flags, and also we 314 * wouldn't be able to implement exendable ouput.) Note that this function is 315 * not used when the whole input is only 1 chunk long; that's a different 316 * codepath. 317 * 318 * Why not just have the caller split the input on the first update(), instead 319 * of implementing this special rule? Because we don't want to limit SIMD or 320 * multi-threading parallelism for that update(). 321 */ 322 static size_t blake3_compress_subtree_wide(const blake3_ops_t *ops, 323 const uint8_t *input, size_t input_len, const uint32_t key[8], 324 uint64_t chunk_counter, uint8_t flags, uint8_t *out) 325 { 326 /* 327 * Note that the single chunk case does *not* bump the SIMD degree up 328 * to 2 when it is 1. If this implementation adds multi-threading in 329 * the future, this gives us the option of multi-threading even the 330 * 2-chunk case, which can help performance on smaller platforms. 331 */ 332 if (input_len <= (size_t)(ops->degree * BLAKE3_CHUNK_LEN)) { 333 return (compress_chunks_parallel(ops, input, input_len, key, 334 chunk_counter, flags, out)); 335 } 336 337 338 /* 339 * With more than simd_degree chunks, we need to recurse. Start by 340 * dividing the input into left and right subtrees. (Note that this is 341 * only optimal as long as the SIMD degree is a power of 2. If we ever 342 * get a SIMD degree of 3 or something, we'll need a more complicated 343 * strategy.) 344 */ 345 size_t left_input_len = left_len(input_len); 346 size_t right_input_len = input_len - left_input_len; 347 const uint8_t *right_input = &input[left_input_len]; 348 uint64_t right_chunk_counter = chunk_counter + 349 (uint64_t)(left_input_len / BLAKE3_CHUNK_LEN); 350 351 /* 352 * Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2 353 * to account for the special case of returning 2 outputs when the 354 * SIMD degree is 1. 355 */ 356 uint8_t cv_array[2 * MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN]; 357 size_t degree = ops->degree; 358 if (left_input_len > BLAKE3_CHUNK_LEN && degree == 1) { 359 360 /* 361 * The special case: We always use a degree of at least two, 362 * to make sure there are two outputs. Except, as noted above, 363 * at the chunk level, where we allow degree=1. (Note that the 364 * 1-chunk-input case is a different codepath.) 365 */ 366 degree = 2; 367 } 368 uint8_t *right_cvs = &cv_array[degree * BLAKE3_OUT_LEN]; 369 370 /* 371 * Recurse! If this implementation adds multi-threading support in the 372 * future, this is where it will go. 373 */ 374 size_t left_n = blake3_compress_subtree_wide(ops, input, left_input_len, 375 key, chunk_counter, flags, cv_array); 376 size_t right_n = blake3_compress_subtree_wide(ops, right_input, 377 right_input_len, key, right_chunk_counter, flags, right_cvs); 378 379 /* 380 * The special case again. If simd_degree=1, then we'll have left_n=1 381 * and right_n=1. Rather than compressing them into a single output, 382 * return them directly, to make sure we always have at least two 383 * outputs. 384 */ 385 if (left_n == 1) { 386 memcpy(out, cv_array, 2 * BLAKE3_OUT_LEN); 387 return (2); 388 } 389 390 /* Otherwise, do one layer of parent node compression. */ 391 size_t num_chaining_values = left_n + right_n; 392 return compress_parents_parallel(ops, cv_array, 393 num_chaining_values, key, flags, out); 394 } 395 396 /* 397 * Hash a subtree with compress_subtree_wide(), and then condense the resulting 398 * list of chaining values down to a single parent node. Don't compress that 399 * last parent node, however. Instead, return its message bytes (the 400 * concatenated chaining values of its children). This is necessary when the 401 * first call to update() supplies a complete subtree, because the topmost 402 * parent node of that subtree could end up being the root. It's also necessary 403 * for extended output in the general case. 404 * 405 * As with compress_subtree_wide(), this function is not used on inputs of 1 406 * chunk or less. That's a different codepath. 407 */ 408 static void compress_subtree_to_parent_node(const blake3_ops_t *ops, 409 const uint8_t *input, size_t input_len, const uint32_t key[8], 410 uint64_t chunk_counter, uint8_t flags, uint8_t out[2 * BLAKE3_OUT_LEN]) 411 { 412 uint8_t cv_array[MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN]; 413 size_t num_cvs = blake3_compress_subtree_wide(ops, input, input_len, 414 key, chunk_counter, flags, cv_array); 415 416 /* 417 * If MAX_SIMD_DEGREE is greater than 2 and there's enough input, 418 * compress_subtree_wide() returns more than 2 chaining values. Condense 419 * them into 2 by forming parent nodes repeatedly. 420 */ 421 uint8_t out_array[MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN / 2]; 422 while (num_cvs > 2) { 423 num_cvs = compress_parents_parallel(ops, cv_array, num_cvs, key, 424 flags, out_array); 425 memcpy(cv_array, out_array, num_cvs * BLAKE3_OUT_LEN); 426 } 427 memcpy(out, cv_array, 2 * BLAKE3_OUT_LEN); 428 } 429 430 static void hasher_init_base(BLAKE3_CTX *ctx, const uint32_t key[8], 431 uint8_t flags) 432 { 433 memcpy(ctx->key, key, BLAKE3_KEY_LEN); 434 chunk_state_init(&ctx->chunk, key, flags); 435 ctx->cv_stack_len = 0; 436 ctx->ops = blake3_get_ops(); 437 } 438 439 /* 440 * As described in hasher_push_cv() below, we do "lazy merging", delaying 441 * merges until right before the next CV is about to be added. This is 442 * different from the reference implementation. Another difference is that we 443 * aren't always merging 1 chunk at a time. Instead, each CV might represent 444 * any power-of-two number of chunks, as long as the smaller-above-larger 445 * stack order is maintained. Instead of the "count the trailing 0-bits" 446 * algorithm described in the spec, we use a "count the total number of 447 * 1-bits" variant that doesn't require us to retain the subtree size of the 448 * CV on top of the stack. The principle is the same: each CV that should 449 * remain in the stack is represented by a 1-bit in the total number of chunks 450 * (or bytes) so far. 451 */ 452 static void hasher_merge_cv_stack(BLAKE3_CTX *ctx, uint64_t total_len) 453 { 454 size_t post_merge_stack_len = (size_t)popcnt(total_len); 455 while (ctx->cv_stack_len > post_merge_stack_len) { 456 uint8_t *parent_node = 457 &ctx->cv_stack[(ctx->cv_stack_len - 2) * BLAKE3_OUT_LEN]; 458 output_t output = 459 parent_output(parent_node, ctx->key, ctx->chunk.flags); 460 output_chaining_value(ctx->ops, &output, parent_node); 461 ctx->cv_stack_len -= 1; 462 } 463 } 464 465 /* 466 * In reference_impl.rs, we merge the new CV with existing CVs from the stack 467 * before pushing it. We can do that because we know more input is coming, so 468 * we know none of the merges are root. 469 * 470 * This setting is different. We want to feed as much input as possible to 471 * compress_subtree_wide(), without setting aside anything for the chunk_state. 472 * If the user gives us 64 KiB, we want to parallelize over all 64 KiB at once 473 * as a single subtree, if at all possible. 474 * 475 * This leads to two problems: 476 * 1) This 64 KiB input might be the only call that ever gets made to update. 477 * In this case, the root node of the 64 KiB subtree would be the root node 478 * of the whole tree, and it would need to be ROOT finalized. We can't 479 * compress it until we know. 480 * 2) This 64 KiB input might complete a larger tree, whose root node is 481 * similarly going to be the the root of the whole tree. For example, maybe 482 * we have 196 KiB (that is, 128 + 64) hashed so far. We can't compress the 483 * node at the root of the 256 KiB subtree until we know how to finalize it. 484 * 485 * The second problem is solved with "lazy merging". That is, when we're about 486 * to add a CV to the stack, we don't merge it with anything first, as the 487 * reference impl does. Instead we do merges using the *previous* CV that was 488 * added, which is sitting on top of the stack, and we put the new CV 489 * (unmerged) on top of the stack afterwards. This guarantees that we never 490 * merge the root node until finalize(). 491 * 492 * Solving the first problem requires an additional tool, 493 * compress_subtree_to_parent_node(). That function always returns the top 494 * *two* chaining values of the subtree it's compressing. We then do lazy 495 * merging with each of them separately, so that the second CV will always 496 * remain unmerged. (That also helps us support extendable output when we're 497 * hashing an input all-at-once.) 498 */ 499 static void hasher_push_cv(BLAKE3_CTX *ctx, uint8_t new_cv[BLAKE3_OUT_LEN], 500 uint64_t chunk_counter) 501 { 502 hasher_merge_cv_stack(ctx, chunk_counter); 503 memcpy(&ctx->cv_stack[ctx->cv_stack_len * BLAKE3_OUT_LEN], new_cv, 504 BLAKE3_OUT_LEN); 505 ctx->cv_stack_len += 1; 506 } 507 508 void 509 Blake3_Init(BLAKE3_CTX *ctx) 510 { 511 hasher_init_base(ctx, BLAKE3_IV, 0); 512 } 513 514 void 515 Blake3_InitKeyed(BLAKE3_CTX *ctx, const uint8_t key[BLAKE3_KEY_LEN]) 516 { 517 uint32_t key_words[8]; 518 load_key_words(key, key_words); 519 hasher_init_base(ctx, key_words, KEYED_HASH); 520 } 521 522 static void 523 Blake3_Update2(BLAKE3_CTX *ctx, const void *input, size_t input_len) 524 { 525 /* 526 * Explicitly checking for zero avoids causing UB by passing a null 527 * pointer to memcpy. This comes up in practice with things like: 528 * std::vector<uint8_t> v; 529 * blake3_hasher_update(&hasher, v.data(), v.size()); 530 */ 531 if (input_len == 0) { 532 return; 533 } 534 535 const uint8_t *input_bytes = (const uint8_t *)input; 536 537 /* 538 * If we have some partial chunk bytes in the internal chunk_state, we 539 * need to finish that chunk first. 540 */ 541 if (chunk_state_len(&ctx->chunk) > 0) { 542 size_t take = BLAKE3_CHUNK_LEN - chunk_state_len(&ctx->chunk); 543 if (take > input_len) { 544 take = input_len; 545 } 546 chunk_state_update(ctx->ops, &ctx->chunk, input_bytes, take); 547 input_bytes += take; 548 input_len -= take; 549 /* 550 * If we've filled the current chunk and there's more coming, 551 * finalize this chunk and proceed. In this case we know it's 552 * not the root. 553 */ 554 if (input_len > 0) { 555 output_t output = chunk_state_output(&ctx->chunk); 556 uint8_t chunk_cv[32]; 557 output_chaining_value(ctx->ops, &output, chunk_cv); 558 hasher_push_cv(ctx, chunk_cv, ctx->chunk.chunk_counter); 559 chunk_state_reset(&ctx->chunk, ctx->key, 560 ctx->chunk.chunk_counter + 1); 561 } else { 562 return; 563 } 564 } 565 566 /* 567 * Now the chunk_state is clear, and we have more input. If there's 568 * more than a single chunk (so, definitely not the root chunk), hash 569 * the largest whole subtree we can, with the full benefits of SIMD 570 * (and maybe in the future, multi-threading) parallelism. Two 571 * restrictions: 572 * - The subtree has to be a power-of-2 number of chunks. Only 573 * subtrees along the right edge can be incomplete, and we don't know 574 * where the right edge is going to be until we get to finalize(). 575 * - The subtree must evenly divide the total number of chunks up 576 * until this point (if total is not 0). If the current incomplete 577 * subtree is only waiting for 1 more chunk, we can't hash a subtree 578 * of 4 chunks. We have to complete the current subtree first. 579 * Because we might need to break up the input to form powers of 2, or 580 * to evenly divide what we already have, this part runs in a loop. 581 */ 582 while (input_len > BLAKE3_CHUNK_LEN) { 583 size_t subtree_len = round_down_to_power_of_2(input_len); 584 uint64_t count_so_far = 585 ctx->chunk.chunk_counter * BLAKE3_CHUNK_LEN; 586 /* 587 * Shrink the subtree_len until it evenly divides the count so 588 * far. We know that subtree_len itself is a power of 2, so we 589 * can use a bitmasking trick instead of an actual remainder 590 * operation. (Note that if the caller consistently passes 591 * power-of-2 inputs of the same size, as is hopefully 592 * typical, this loop condition will always fail, and 593 * subtree_len will always be the full length of the input.) 594 * 595 * An aside: We don't have to shrink subtree_len quite this 596 * much. For example, if count_so_far is 1, we could pass 2 597 * chunks to compress_subtree_to_parent_node. Since we'll get 598 * 2 CVs back, we'll still get the right answer in the end, 599 * and we might get to use 2-way SIMD parallelism. The problem 600 * with this optimization, is that it gets us stuck always 601 * hashing 2 chunks. The total number of chunks will remain 602 * odd, and we'll never graduate to higher degrees of 603 * parallelism. See 604 * https://github.com/BLAKE3-team/BLAKE3/issues/69. 605 */ 606 while ((((uint64_t)(subtree_len - 1)) & count_so_far) != 0) { 607 subtree_len /= 2; 608 } 609 /* 610 * The shrunken subtree_len might now be 1 chunk long. If so, 611 * hash that one chunk by itself. Otherwise, compress the 612 * subtree into a pair of CVs. 613 */ 614 uint64_t subtree_chunks = subtree_len / BLAKE3_CHUNK_LEN; 615 if (subtree_len <= BLAKE3_CHUNK_LEN) { 616 blake3_chunk_state_t chunk_state; 617 chunk_state_init(&chunk_state, ctx->key, 618 ctx->chunk.flags); 619 chunk_state.chunk_counter = ctx->chunk.chunk_counter; 620 chunk_state_update(ctx->ops, &chunk_state, input_bytes, 621 subtree_len); 622 output_t output = chunk_state_output(&chunk_state); 623 uint8_t cv[BLAKE3_OUT_LEN]; 624 output_chaining_value(ctx->ops, &output, cv); 625 hasher_push_cv(ctx, cv, chunk_state.chunk_counter); 626 } else { 627 /* 628 * This is the high-performance happy path, though 629 * getting here depends on the caller giving us a long 630 * enough input. 631 */ 632 uint8_t cv_pair[2 * BLAKE3_OUT_LEN]; 633 compress_subtree_to_parent_node(ctx->ops, input_bytes, 634 subtree_len, ctx->key, ctx-> chunk.chunk_counter, 635 ctx->chunk.flags, cv_pair); 636 hasher_push_cv(ctx, cv_pair, ctx->chunk.chunk_counter); 637 hasher_push_cv(ctx, &cv_pair[BLAKE3_OUT_LEN], 638 ctx->chunk.chunk_counter + (subtree_chunks / 2)); 639 } 640 ctx->chunk.chunk_counter += subtree_chunks; 641 input_bytes += subtree_len; 642 input_len -= subtree_len; 643 } 644 645 /* 646 * If there's any remaining input less than a full chunk, add it to 647 * the chunk state. In that case, also do a final merge loop to make 648 * sure the subtree stack doesn't contain any unmerged pairs. The 649 * remaining input means we know these merges are non-root. This merge 650 * loop isn't strictly necessary here, because hasher_push_chunk_cv 651 * already does its own merge loop, but it simplifies 652 * blake3_hasher_finalize below. 653 */ 654 if (input_len > 0) { 655 chunk_state_update(ctx->ops, &ctx->chunk, input_bytes, 656 input_len); 657 hasher_merge_cv_stack(ctx, ctx->chunk.chunk_counter); 658 } 659 } 660 661 void 662 Blake3_Update(BLAKE3_CTX *ctx, const void *input, size_t todo) 663 { 664 size_t done = 0; 665 const uint8_t *data = input; 666 const size_t block_max = 1024 * 64; 667 668 /* max feed buffer to leave the stack size small */ 669 while (todo != 0) { 670 size_t block = (todo >= block_max) ? block_max : todo; 671 Blake3_Update2(ctx, data + done, block); 672 done += block; 673 todo -= block; 674 } 675 } 676 677 void 678 Blake3_Final(const BLAKE3_CTX *ctx, uint8_t *out) 679 { 680 Blake3_FinalSeek(ctx, 0, out, BLAKE3_OUT_LEN); 681 } 682 683 void 684 Blake3_FinalSeek(const BLAKE3_CTX *ctx, uint64_t seek, uint8_t *out, 685 size_t out_len) 686 { 687 /* 688 * Explicitly checking for zero avoids causing UB by passing a null 689 * pointer to memcpy. This comes up in practice with things like: 690 * std::vector<uint8_t> v; 691 * blake3_hasher_finalize(&hasher, v.data(), v.size()); 692 */ 693 if (out_len == 0) { 694 return; 695 } 696 /* If the subtree stack is empty, then the current chunk is the root. */ 697 if (ctx->cv_stack_len == 0) { 698 output_t output = chunk_state_output(&ctx->chunk); 699 output_root_bytes(ctx->ops, &output, seek, out, out_len); 700 return; 701 } 702 /* 703 * If there are any bytes in the chunk state, finalize that chunk and 704 * do a roll-up merge between that chunk hash and every subtree in the 705 * stack. In this case, the extra merge loop at the end of 706 * blake3_hasher_update guarantees that none of the subtrees in the 707 * stack need to be merged with each other first. Otherwise, if there 708 * are no bytes in the chunk state, then the top of the stack is a 709 * chunk hash, and we start the merge from that. 710 */ 711 output_t output; 712 size_t cvs_remaining; 713 if (chunk_state_len(&ctx->chunk) > 0) { 714 cvs_remaining = ctx->cv_stack_len; 715 output = chunk_state_output(&ctx->chunk); 716 } else { 717 /* There are always at least 2 CVs in the stack in this case. */ 718 cvs_remaining = ctx->cv_stack_len - 2; 719 output = parent_output(&ctx->cv_stack[cvs_remaining * 32], 720 ctx->key, ctx->chunk.flags); 721 } 722 while (cvs_remaining > 0) { 723 cvs_remaining -= 1; 724 uint8_t parent_block[BLAKE3_BLOCK_LEN]; 725 memcpy(parent_block, &ctx->cv_stack[cvs_remaining * 32], 32); 726 output_chaining_value(ctx->ops, &output, &parent_block[32]); 727 output = parent_output(parent_block, ctx->key, 728 ctx->chunk.flags); 729 } 730 output_root_bytes(ctx->ops, &output, seek, out, out_len); 731 } 732