1 /*
2 * Copyright (C) 2021 - This file is part of libecc project
3 *
4 * Authors:
5 * Ryad BENADJILA <ryadbenadjila@gmail.com>
6 * Arnaud EBALARD <arnaud.ebalard@ssi.gouv.fr>
7 *
8 * This software is licensed under a dual BSD and GPL v2 license.
9 * See LICENSE file at the root folder of the project.
10 */
11 #include "md4.h"
12
13 /* All the inner MD-4 operations */
14 static const u32 C1_MD4[13] = {
15 0, 4, 8, 12, 0, 1, 2, 3, 3, 7, 11, 19, 0
16 };
17 static const u32 C2_MD4[13] = {
18 0, 1, 2, 3, 0, 4, 8, 12, 3, 5, 9, 13, 0x5a827999
19 };
20 static const u32 C3_MD4[13] = {
21 0, 2, 1, 3, 0, 8, 4, 12, 3, 9, 11, 15, 0x6ed9eba1
22 };
23
24 #define F_MD4(x, y, z) (((x) & (y)) | ((~(x)) & (z)))
25 #define G_MD4(x, y, z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
26 #define H_MD4(x, y, z) ((x) ^ (y) ^ (z))
27
28 /* SHA-2 core processing. Returns 0 on success, -1 on error. */
md4_process(md4_context * ctx,const u8 data[MD4_BLOCK_SIZE])29 ATTRIBUTE_WARN_UNUSED_RET static inline int md4_process(md4_context *ctx,
30 const u8 data[MD4_BLOCK_SIZE])
31 {
32 u32 A, B, C, D;
33 u32 W[16];
34 u32 idx;
35 int ret;
36 unsigned int i;
37
38 MUST_HAVE((data != NULL), ret, err);
39 MD4_HASH_CHECK_INITIALIZED(ctx, ret, err);
40
41 /* Init our inner variables */
42 A = ctx->md4_state[0];
43 B = ctx->md4_state[1];
44 C = ctx->md4_state[2];
45 D = ctx->md4_state[3];
46
47 /* Load data */
48 for (i = 0; i < 16; i++) {
49 GET_UINT32_LE(W[i], data, (4 * i));
50 }
51 /* Proceed with the compression */
52 for (i = 0; i < 4; i++) {
53 idx = (C1_MD4[i] + C1_MD4[4]);
54 A = ROTL_MD4((A + F_MD4(B, C, D) + W[idx] + C1_MD4[12]), C1_MD4[8]);
55 idx = (C1_MD4[i] + C1_MD4[5]);
56 D = ROTL_MD4((D + F_MD4(A, B, C) + W[idx] + C1_MD4[12]), C1_MD4[9]);
57 idx = (C1_MD4[i] + C1_MD4[6]);
58 C = ROTL_MD4((C + F_MD4(D, A, B) + W[idx] + C1_MD4[12]), C1_MD4[10]);
59 idx = (C1_MD4[i] + C1_MD4[7]);
60 B = ROTL_MD4((B + F_MD4(C, D, A) + W[idx] + C1_MD4[12]), C1_MD4[11]);
61 }
62 for (i = 0; i < 4; i++) {
63 idx = (C2_MD4[i] + C2_MD4[4]);
64 A = ROTL_MD4((A + G_MD4(B, C, D) + W[idx] + C2_MD4[12]), C2_MD4[8]);
65 idx = (C2_MD4[i] + C2_MD4[5]);
66 D = ROTL_MD4((D + G_MD4(A, B, C) + W[idx] + C2_MD4[12]), C2_MD4[9]);
67 idx = (C2_MD4[i] + C2_MD4[6]);
68 C = ROTL_MD4((C + G_MD4(D, A, B) + W[idx] + C2_MD4[12]), C2_MD4[10]);
69 idx = (C2_MD4[i] + C2_MD4[7]);
70 B = ROTL_MD4((B + G_MD4(C, D, A) + W[idx] + C2_MD4[12]), C2_MD4[11]);
71 }
72 for (i = 0; i < 4; i++) {
73 idx = (C3_MD4[i] + C3_MD4[4]);
74 A = ROTL_MD4((A + H_MD4(B, C, D) + W[idx] + C3_MD4[12]), C3_MD4[8]);
75 idx = (C3_MD4[i] + C3_MD4[5]);
76 D = ROTL_MD4((D + H_MD4(A, B, C) + W[idx] + C3_MD4[12]), C3_MD4[9]);
77 idx = (C3_MD4[i] + C3_MD4[6]);
78 C = ROTL_MD4((C + H_MD4(D, A, B) + W[idx] + C3_MD4[12]), C3_MD4[10]);
79 idx = (C3_MD4[i] + C3_MD4[7]);
80 B = ROTL_MD4((B + H_MD4(C, D, A) + W[idx] + C3_MD4[12]), C3_MD4[11]);
81 }
82
83 /* Update state */
84 ctx->md4_state[0] += A;
85 ctx->md4_state[1] += B;
86 ctx->md4_state[2] += C;
87 ctx->md4_state[3] += D;
88
89 ret = 0;
90
91 err:
92 return ret;
93 }
94
95 /* Init hash function. Returns 0 on success, -1 on error. */
md4_init(md4_context * ctx)96 ATTRIBUTE_WARN_UNUSED_RET int md4_init(md4_context *ctx)
97 {
98 int ret;
99
100 MUST_HAVE((ctx != NULL), ret, err);
101
102 /* Sanity check on size */
103 MUST_HAVE((MD4_DIGEST_SIZE <= MAX_DIGEST_SIZE), ret, err);
104
105 ctx->md4_total = 0;
106 ctx->md4_state[0] = 0x67452301;
107 ctx->md4_state[1] = 0xEFCDAB89;
108 ctx->md4_state[2] = 0x98BADCFE;
109 ctx->md4_state[3] = 0x10325476;
110
111 /* Tell that we are initialized */
112 ctx->magic = MD4_HASH_MAGIC;
113
114 ret = 0;
115
116 err:
117 return ret;
118 }
119
md4_update(md4_context * ctx,const u8 * input,u32 ilen)120 ATTRIBUTE_WARN_UNUSED_RET int md4_update(md4_context *ctx, const u8 *input, u32 ilen)
121 {
122 const u8 *data_ptr = input;
123 u32 remain_ilen = ilen;
124 u16 fill;
125 u8 left;
126 int ret;
127
128 MUST_HAVE((input != NULL) || (ilen == 0), ret, err);
129 MD4_HASH_CHECK_INITIALIZED(ctx, ret, err);
130
131 /* Nothing to process, return */
132 if (ilen == 0) {
133 ret = 0;
134 goto err;
135 }
136
137 /* Get what's left in our local buffer */
138 left = (ctx->md4_total & 0x3F);
139 fill = (u16)(MD4_BLOCK_SIZE - left);
140
141 ctx->md4_total += ilen;
142
143 if ((left > 0) && (remain_ilen >= fill)) {
144 /* Copy data at the end of the buffer */
145 ret = local_memcpy(ctx->md4_buffer + left, data_ptr, fill); EG(ret, err);
146 ret = md4_process(ctx, ctx->md4_buffer); EG(ret, err);
147 data_ptr += fill;
148 remain_ilen -= fill;
149 left = 0;
150 }
151
152 while (remain_ilen >= MD4_BLOCK_SIZE) {
153 ret = md4_process(ctx, data_ptr); EG(ret, err);
154 data_ptr += MD4_BLOCK_SIZE;
155 remain_ilen -= MD4_BLOCK_SIZE;
156 }
157
158 if (remain_ilen > 0) {
159 ret = local_memcpy(ctx->md4_buffer + left, data_ptr, remain_ilen); EG(ret, err);
160 }
161
162 ret = 0;
163
164 err:
165 return ret;
166 }
167
168 /* Finalize. Returns 0 on success, -1 on error.*/
md4_final(md4_context * ctx,u8 output[MD4_DIGEST_SIZE])169 ATTRIBUTE_WARN_UNUSED_RET int md4_final(md4_context *ctx, u8 output[MD4_DIGEST_SIZE])
170 {
171 unsigned int block_present = 0;
172 u8 last_padded_block[2 * MD4_BLOCK_SIZE];
173 int ret;
174
175 MUST_HAVE((output != NULL), ret, err);
176 MD4_HASH_CHECK_INITIALIZED(ctx, ret, err);
177
178 /* Fill in our last block with zeroes */
179 ret = local_memset(last_padded_block, 0, sizeof(last_padded_block)); EG(ret, err);
180
181 /* This is our final step, so we proceed with the padding */
182 block_present = ctx->md4_total % MD4_BLOCK_SIZE;
183 if (block_present != 0) {
184 /* Copy what's left in our temporary context buffer */
185 ret = local_memcpy(last_padded_block, ctx->md4_buffer,
186 block_present); EG(ret, err);
187 }
188
189 /* Put the 0x80 byte, beginning of padding */
190 last_padded_block[block_present] = 0x80;
191
192 /* Handle possible additional block */
193 if (block_present > (MD4_BLOCK_SIZE - 1 - sizeof(u64))) {
194 /* We need an additional block */
195 PUT_UINT64_LE(8 * ctx->md4_total, last_padded_block,
196 (2 * MD4_BLOCK_SIZE) - sizeof(u64));
197 ret = md4_process(ctx, last_padded_block); EG(ret, err);
198 ret = md4_process(ctx, last_padded_block + MD4_BLOCK_SIZE); EG(ret, err);
199 } else {
200 /* We do not need an additional block */
201 PUT_UINT64_LE(8 * ctx->md4_total, last_padded_block,
202 MD4_BLOCK_SIZE - sizeof(u64));
203 ret = md4_process(ctx, last_padded_block); EG(ret, err);
204 }
205
206 /* Output the hash result */
207 PUT_UINT32_LE(ctx->md4_state[0], output, 0);
208 PUT_UINT32_LE(ctx->md4_state[1], output, 4);
209 PUT_UINT32_LE(ctx->md4_state[2], output, 8);
210 PUT_UINT32_LE(ctx->md4_state[3], output, 12);
211
212 /* Tell that we are uninitialized */
213 ctx->magic = WORD(0);
214
215 ret = 0;
216
217 err:
218 return ret;
219 }
220
221
222 /*
223 * Scattered version performing init/update/finalize on a vector of buffers
224 * 'inputs' with the length of each buffer passed via 'ilens'. The function
225 * loops on pointers in 'inputs' until it finds a NULL pointer. The function
226 * returns 0 on success, -1 on error.
227 */
md4_scattered(const u8 ** inputs,const u32 * ilens,u8 output[MD4_DIGEST_SIZE])228 ATTRIBUTE_WARN_UNUSED_RET int md4_scattered(const u8 **inputs, const u32 *ilens,
229 u8 output[MD4_DIGEST_SIZE])
230 {
231 md4_context ctx;
232 int ret, pos = 0;
233
234 MUST_HAVE((inputs != NULL) && (ilens != NULL) && (output != NULL), ret, err);
235
236 ret = md4_init(&ctx); EG(ret, err);
237
238 while (inputs[pos] != NULL) {
239 ret = md4_update(&ctx, inputs[pos], ilens[pos]); EG(ret, err);
240 pos += 1;
241 }
242
243 ret = md4_final(&ctx, output);
244
245 err:
246 return ret;
247 }
248
249 /*
250 * Single call version performing init/update/final on given input.
251 * Returns 0 on success, -1 on error.
252 */
md4(const u8 * input,u32 ilen,u8 output[MD4_DIGEST_SIZE])253 ATTRIBUTE_WARN_UNUSED_RET int md4(const u8 *input, u32 ilen, u8 output[MD4_DIGEST_SIZE])
254 {
255 md4_context ctx;
256 int ret;
257
258 ret = md4_init(&ctx); EG(ret, err);
259 ret = md4_update(&ctx, input, ilen); EG(ret, err);
260 ret = md4_final(&ctx, output);
261
262 err:
263 return ret;
264 }
265