Lines Matching full:chunk

127  * Chaining values within a given chunk (specifically the compress_in_place
212 * Given some input larger than one chunk, return the number of bytes that
229 * on a single thread. Write out the chunk chaining values and return the
251 * Hash the remaining partial chunk, if there is one. Note that the in compress_chunks_parallel()
252 * empty chunk (meaning the empty message) is a different codepath. in compress_chunks_parallel()
316 * not used when the whole input is only 1 chunk long; that's a different
328 * Note that the single chunk case does *not* bump the SIMD degree up in blake3_compress_subtree_wide()
331 * 2-chunk case, which can help performance on smaller platforms. in blake3_compress_subtree_wide()
364 * at the chunk level, where we allow degree=1. (Note that the in blake3_compress_subtree_wide()
365 * 1-chunk-input case is a different codepath.) in blake3_compress_subtree_wide()
407 * chunk or less. That's a different codepath.
435 chunk_state_init(&ctx->chunk, key, flags); in hasher_init_base()
444 * aren't always merging 1 chunk at a time. Instead, each CV might represent
460 parent_output(parent_node, ctx->key, ctx->chunk.flags); in hasher_merge_cv_stack()
539 * If we have some partial chunk bytes in the internal chunk_state, we in Blake3_Update2()
540 * need to finish that chunk first. in Blake3_Update2()
542 if (chunk_state_len(&ctx->chunk) > 0) { in Blake3_Update2()
543 size_t take = BLAKE3_CHUNK_LEN - chunk_state_len(&ctx->chunk); in Blake3_Update2()
547 chunk_state_update(ctx->ops, &ctx->chunk, input_bytes, take); in Blake3_Update2()
551 * If we've filled the current chunk and there's more coming, in Blake3_Update2()
552 * finalize this chunk and proceed. In this case we know it's in Blake3_Update2()
556 output_t output = chunk_state_output(&ctx->chunk); in Blake3_Update2()
559 hasher_push_cv(ctx, chunk_cv, ctx->chunk.chunk_counter); in Blake3_Update2()
560 chunk_state_reset(&ctx->chunk, ctx->key, in Blake3_Update2()
561 ctx->chunk.chunk_counter + 1); in Blake3_Update2()
569 * more than a single chunk (so, definitely not the root chunk), hash in Blake3_Update2()
578 * subtree is only waiting for 1 more chunk, we can't hash a subtree in Blake3_Update2()
586 ctx->chunk.chunk_counter * BLAKE3_CHUNK_LEN; in Blake3_Update2()
611 * The shrunken subtree_len might now be 1 chunk long. If so, in Blake3_Update2()
612 * hash that one chunk by itself. Otherwise, compress the in Blake3_Update2()
619 ctx->chunk.flags); in Blake3_Update2()
620 chunk_state.chunk_counter = ctx->chunk.chunk_counter; in Blake3_Update2()
635 subtree_len, ctx->key, ctx-> chunk.chunk_counter, in Blake3_Update2()
636 ctx->chunk.flags, cv_pair); in Blake3_Update2()
637 hasher_push_cv(ctx, cv_pair, ctx->chunk.chunk_counter); in Blake3_Update2()
639 ctx->chunk.chunk_counter + (subtree_chunks / 2)); in Blake3_Update2()
641 ctx->chunk.chunk_counter += subtree_chunks; in Blake3_Update2()
647 * If there's any remaining input less than a full chunk, add it to in Blake3_Update2()
648 * the chunk state. In that case, also do a final merge loop to make in Blake3_Update2()
656 chunk_state_update(ctx->ops, &ctx->chunk, input_bytes, in Blake3_Update2()
658 hasher_merge_cv_stack(ctx, ctx->chunk.chunk_counter); in Blake3_Update2()
697 /* If the subtree stack is empty, then the current chunk is the root. */ in Blake3_FinalSeek()
699 output_t output = chunk_state_output(&ctx->chunk); in Blake3_FinalSeek()
704 * If there are any bytes in the chunk state, finalize that chunk and in Blake3_FinalSeek()
705 * do a roll-up merge between that chunk hash and every subtree in the in Blake3_FinalSeek()
709 * are no bytes in the chunk state, then the top of the stack is a in Blake3_FinalSeek()
710 * chunk hash, and we start the merge from that. in Blake3_FinalSeek()
714 if (chunk_state_len(&ctx->chunk) > 0) { in Blake3_FinalSeek()
716 output = chunk_state_output(&ctx->chunk); in Blake3_FinalSeek()
721 ctx->key, ctx->chunk.flags); in Blake3_FinalSeek()
729 ctx->chunk.flags); in Blake3_FinalSeek()