xref: /linux/lib/crypto/blake2b.c (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4  * Copyright 2025 Google LLC
5  *
6  * This is an implementation of the BLAKE2b hash and PRF functions.
7  *
8  * Information: https://blake2.net/
9  */
10 
11 #include <crypto/blake2b.h>
12 #include <linux/bug.h>
13 #include <linux/export.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 
19 static const u8 blake2b_sigma[12][16] = {
20 	{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
21 	{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
22 	{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
23 	{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
24 	{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
25 	{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
26 	{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
27 	{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
28 	{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
29 	{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
30 	{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
31 	{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }
32 };
33 
34 static inline void blake2b_increment_counter(struct blake2b_ctx *ctx, u32 inc)
35 {
36 	ctx->t[0] += inc;
37 	ctx->t[1] += (ctx->t[0] < inc);
38 }
39 
40 static void __maybe_unused
41 blake2b_compress_generic(struct blake2b_ctx *ctx,
42 			 const u8 *data, size_t nblocks, u32 inc)
43 {
44 	u64 m[16];
45 	u64 v[16];
46 	int i;
47 
48 	WARN_ON(IS_ENABLED(DEBUG) &&
49 		(nblocks > 1 && inc != BLAKE2B_BLOCK_SIZE));
50 
51 	while (nblocks > 0) {
52 		blake2b_increment_counter(ctx, inc);
53 		memcpy(m, data, BLAKE2B_BLOCK_SIZE);
54 		le64_to_cpu_array(m, ARRAY_SIZE(m));
55 		memcpy(v, ctx->h, 64);
56 		v[ 8] = BLAKE2B_IV0;
57 		v[ 9] = BLAKE2B_IV1;
58 		v[10] = BLAKE2B_IV2;
59 		v[11] = BLAKE2B_IV3;
60 		v[12] = BLAKE2B_IV4 ^ ctx->t[0];
61 		v[13] = BLAKE2B_IV5 ^ ctx->t[1];
62 		v[14] = BLAKE2B_IV6 ^ ctx->f[0];
63 		v[15] = BLAKE2B_IV7 ^ ctx->f[1];
64 
65 #define G(r, i, a, b, c, d) do { \
66 	a += b + m[blake2b_sigma[r][2 * i + 0]]; \
67 	d = ror64(d ^ a, 32); \
68 	c += d; \
69 	b = ror64(b ^ c, 24); \
70 	a += b + m[blake2b_sigma[r][2 * i + 1]]; \
71 	d = ror64(d ^ a, 16); \
72 	c += d; \
73 	b = ror64(b ^ c, 63); \
74 } while (0)
75 
76 #define ROUND(r) do { \
77 	G(r, 0, v[0], v[ 4], v[ 8], v[12]); \
78 	G(r, 1, v[1], v[ 5], v[ 9], v[13]); \
79 	G(r, 2, v[2], v[ 6], v[10], v[14]); \
80 	G(r, 3, v[3], v[ 7], v[11], v[15]); \
81 	G(r, 4, v[0], v[ 5], v[10], v[15]); \
82 	G(r, 5, v[1], v[ 6], v[11], v[12]); \
83 	G(r, 6, v[2], v[ 7], v[ 8], v[13]); \
84 	G(r, 7, v[3], v[ 4], v[ 9], v[14]); \
85 } while (0)
86 		ROUND(0);
87 		ROUND(1);
88 		ROUND(2);
89 		ROUND(3);
90 		ROUND(4);
91 		ROUND(5);
92 		ROUND(6);
93 		ROUND(7);
94 		ROUND(8);
95 		ROUND(9);
96 		ROUND(10);
97 		ROUND(11);
98 
99 #undef G
100 #undef ROUND
101 
102 		for (i = 0; i < 8; ++i)
103 			ctx->h[i] ^= v[i] ^ v[i + 8];
104 
105 		data += BLAKE2B_BLOCK_SIZE;
106 		--nblocks;
107 	}
108 }
109 
110 #ifdef CONFIG_CRYPTO_LIB_BLAKE2B_ARCH
111 #include "blake2b.h" /* $(SRCARCH)/blake2b.h */
112 #else
113 #define blake2b_compress blake2b_compress_generic
114 #endif
115 
116 static inline void blake2b_set_lastblock(struct blake2b_ctx *ctx)
117 {
118 	ctx->f[0] = -1;
119 }
120 
121 void blake2b_update(struct blake2b_ctx *ctx, const u8 *in, size_t inlen)
122 {
123 	const size_t fill = BLAKE2B_BLOCK_SIZE - ctx->buflen;
124 
125 	if (unlikely(!inlen))
126 		return;
127 	if (inlen > fill) {
128 		memcpy(ctx->buf + ctx->buflen, in, fill);
129 		blake2b_compress(ctx, ctx->buf, 1, BLAKE2B_BLOCK_SIZE);
130 		ctx->buflen = 0;
131 		in += fill;
132 		inlen -= fill;
133 	}
134 	if (inlen > BLAKE2B_BLOCK_SIZE) {
135 		const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2B_BLOCK_SIZE);
136 
137 		blake2b_compress(ctx, in, nblocks - 1, BLAKE2B_BLOCK_SIZE);
138 		in += BLAKE2B_BLOCK_SIZE * (nblocks - 1);
139 		inlen -= BLAKE2B_BLOCK_SIZE * (nblocks - 1);
140 	}
141 	memcpy(ctx->buf + ctx->buflen, in, inlen);
142 	ctx->buflen += inlen;
143 }
144 EXPORT_SYMBOL(blake2b_update);
145 
146 void blake2b_final(struct blake2b_ctx *ctx, u8 *out)
147 {
148 	WARN_ON(IS_ENABLED(DEBUG) && !out);
149 	blake2b_set_lastblock(ctx);
150 	memset(ctx->buf + ctx->buflen, 0,
151 	       BLAKE2B_BLOCK_SIZE - ctx->buflen); /* Padding */
152 	blake2b_compress(ctx, ctx->buf, 1, ctx->buflen);
153 	cpu_to_le64_array(ctx->h, ARRAY_SIZE(ctx->h));
154 	memcpy(out, ctx->h, ctx->outlen);
155 	memzero_explicit(ctx, sizeof(*ctx));
156 }
157 EXPORT_SYMBOL(blake2b_final);
158 
159 #ifdef blake2b_mod_init_arch
160 static int __init blake2b_mod_init(void)
161 {
162 	blake2b_mod_init_arch();
163 	return 0;
164 }
165 subsys_initcall(blake2b_mod_init);
166 
167 static void __exit blake2b_mod_exit(void)
168 {
169 }
170 module_exit(blake2b_mod_exit);
171 #endif
172 
173 MODULE_DESCRIPTION("BLAKE2b hash function");
174 MODULE_LICENSE("GPL");
175