1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Test cases for hash functions, including a benchmark. This is included by
4 * KUnit test suites that want to use it. See sha512_kunit.c for an example.
5 *
6 * Copyright 2025 Google LLC
7 */
8 #include <kunit/test.h>
9 #include <linux/hrtimer.h>
10 #include <linux/timekeeping.h>
11 #include <linux/vmalloc.h>
12 #include <linux/workqueue.h>
13
14 /* test_buf is a guarded buffer, i.e. &test_buf[TEST_BUF_LEN] is not mapped. */
15 #define TEST_BUF_LEN 16384
16 static u8 *test_buf;
17
18 static u8 *orig_test_buf;
19
20 static u64 random_seed;
21
22 /*
23 * This is a simple linear congruential generator. It is used only for testing,
24 * which does not require cryptographically secure random numbers. A hard-coded
25 * algorithm is used instead of <linux/prandom.h> so that it matches the
26 * algorithm used by the test vector generation script. This allows the input
27 * data in random test vectors to be concisely stored as just the seed.
28 */
rand32(void)29 static u32 rand32(void)
30 {
31 random_seed = (random_seed * 25214903917 + 11) & ((1ULL << 48) - 1);
32 return random_seed >> 16;
33 }
34
rand_bytes(u8 * out,size_t len)35 static void rand_bytes(u8 *out, size_t len)
36 {
37 for (size_t i = 0; i < len; i++)
38 out[i] = rand32();
39 }
40
rand_bytes_seeded_from_len(u8 * out,size_t len)41 static void rand_bytes_seeded_from_len(u8 *out, size_t len)
42 {
43 random_seed = len;
44 rand_bytes(out, len);
45 }
46
rand_bool(void)47 static bool rand_bool(void)
48 {
49 return rand32() % 2;
50 }
51
52 /* Generate a random length, preferring small lengths. */
rand_length(size_t max_len)53 static size_t rand_length(size_t max_len)
54 {
55 size_t len;
56
57 switch (rand32() % 3) {
58 case 0:
59 len = rand32() % 128;
60 break;
61 case 1:
62 len = rand32() % 3072;
63 break;
64 default:
65 len = rand32();
66 break;
67 }
68 return len % (max_len + 1);
69 }
70
rand_offset(size_t max_offset)71 static size_t rand_offset(size_t max_offset)
72 {
73 return min(rand32() % 128, max_offset);
74 }
75
hash_suite_init(struct kunit_suite * suite)76 static int hash_suite_init(struct kunit_suite *suite)
77 {
78 /*
79 * Allocate the test buffer using vmalloc() with a page-aligned length
80 * so that it is immediately followed by a guard page. This allows
81 * buffer overreads to be detected, even in assembly code.
82 */
83 size_t alloc_len = round_up(TEST_BUF_LEN, PAGE_SIZE);
84
85 orig_test_buf = vmalloc(alloc_len);
86 if (!orig_test_buf)
87 return -ENOMEM;
88
89 test_buf = orig_test_buf + alloc_len - TEST_BUF_LEN;
90 return 0;
91 }
92
hash_suite_exit(struct kunit_suite * suite)93 static void hash_suite_exit(struct kunit_suite *suite)
94 {
95 vfree(orig_test_buf);
96 orig_test_buf = NULL;
97 test_buf = NULL;
98 }
99
100 /*
101 * Test the hash function against a list of test vectors.
102 *
103 * Note that it's only necessary to run each test vector in one way (e.g.,
104 * one-shot instead of incremental), since consistency between different ways of
105 * using the APIs is verified by other test cases.
106 */
test_hash_test_vectors(struct kunit * test)107 static void test_hash_test_vectors(struct kunit *test)
108 {
109 for (size_t i = 0; i < ARRAY_SIZE(hash_testvecs); i++) {
110 size_t data_len = hash_testvecs[i].data_len;
111 u8 actual_hash[HASH_SIZE];
112
113 KUNIT_ASSERT_LE(test, data_len, TEST_BUF_LEN);
114 rand_bytes_seeded_from_len(test_buf, data_len);
115
116 HASH(test_buf, data_len, actual_hash);
117 KUNIT_ASSERT_MEMEQ_MSG(
118 test, actual_hash, hash_testvecs[i].digest, HASH_SIZE,
119 "Wrong result with test vector %zu; data_len=%zu", i,
120 data_len);
121 }
122 }
123
124 /*
125 * Test that the hash function produces correct results for *every* length up to
126 * 4096 bytes. To do this, generate seeded random data, then calculate a hash
127 * value for each length 0..4096, then hash the hash values. Verify just the
128 * final hash value, which should match only when all hash values were correct.
129 */
test_hash_all_lens_up_to_4096(struct kunit * test)130 static void test_hash_all_lens_up_to_4096(struct kunit *test)
131 {
132 struct HASH_CTX ctx;
133 u8 hash[HASH_SIZE];
134
135 static_assert(TEST_BUF_LEN >= 4096);
136 rand_bytes_seeded_from_len(test_buf, 4096);
137 HASH_INIT(&ctx);
138 for (size_t len = 0; len <= 4096; len++) {
139 HASH(test_buf, len, hash);
140 HASH_UPDATE(&ctx, hash, HASH_SIZE);
141 }
142 HASH_FINAL(&ctx, hash);
143 KUNIT_ASSERT_MEMEQ(test, hash, hash_testvec_consolidated, HASH_SIZE);
144 }
145
146 /*
147 * Test that the hash function produces the same result with a one-shot
148 * computation as it does with an incremental computation.
149 */
test_hash_incremental_updates(struct kunit * test)150 static void test_hash_incremental_updates(struct kunit *test)
151 {
152 for (int i = 0; i < 1000; i++) {
153 size_t total_len, offset;
154 struct HASH_CTX ctx;
155 u8 hash1[HASH_SIZE];
156 u8 hash2[HASH_SIZE];
157 size_t num_parts = 0;
158 size_t remaining_len, cur_offset;
159
160 total_len = rand_length(TEST_BUF_LEN);
161 offset = rand_offset(TEST_BUF_LEN - total_len);
162 rand_bytes(&test_buf[offset], total_len);
163
164 /* Compute the hash value in one shot. */
165 HASH(&test_buf[offset], total_len, hash1);
166
167 /*
168 * Compute the hash value incrementally, using a randomly
169 * selected sequence of update lengths that sum to total_len.
170 */
171 HASH_INIT(&ctx);
172 remaining_len = total_len;
173 cur_offset = offset;
174 while (rand_bool()) {
175 size_t part_len = rand_length(remaining_len);
176
177 HASH_UPDATE(&ctx, &test_buf[cur_offset], part_len);
178 num_parts++;
179 cur_offset += part_len;
180 remaining_len -= part_len;
181 }
182 if (remaining_len != 0 || rand_bool()) {
183 HASH_UPDATE(&ctx, &test_buf[cur_offset], remaining_len);
184 num_parts++;
185 }
186 HASH_FINAL(&ctx, hash2);
187
188 /* Verify that the two hash values are the same. */
189 KUNIT_ASSERT_MEMEQ_MSG(
190 test, hash1, hash2, HASH_SIZE,
191 "Incremental test failed with total_len=%zu num_parts=%zu offset=%zu",
192 total_len, num_parts, offset);
193 }
194 }
195
196 /*
197 * Test that the hash function does not overrun any buffers. Uses a guard page
198 * to catch buffer overruns even if they occur in assembly code.
199 */
test_hash_buffer_overruns(struct kunit * test)200 static void test_hash_buffer_overruns(struct kunit *test)
201 {
202 const size_t max_tested_len = TEST_BUF_LEN - sizeof(struct HASH_CTX);
203 void *const buf_end = &test_buf[TEST_BUF_LEN];
204 struct HASH_CTX *guarded_ctx = buf_end - sizeof(*guarded_ctx);
205
206 rand_bytes(test_buf, TEST_BUF_LEN);
207
208 for (int i = 0; i < 100; i++) {
209 size_t len = rand_length(max_tested_len);
210 struct HASH_CTX ctx;
211 u8 hash[HASH_SIZE];
212
213 /* Check for overruns of the data buffer. */
214 HASH(buf_end - len, len, hash);
215 HASH_INIT(&ctx);
216 HASH_UPDATE(&ctx, buf_end - len, len);
217 HASH_FINAL(&ctx, hash);
218
219 /* Check for overruns of the hash value buffer. */
220 HASH(test_buf, len, buf_end - HASH_SIZE);
221 HASH_INIT(&ctx);
222 HASH_UPDATE(&ctx, test_buf, len);
223 HASH_FINAL(&ctx, buf_end - HASH_SIZE);
224
225 /* Check for overuns of the hash context. */
226 HASH_INIT(guarded_ctx);
227 HASH_UPDATE(guarded_ctx, test_buf, len);
228 HASH_FINAL(guarded_ctx, hash);
229 }
230 }
231
232 /*
233 * Test that the caller is permitted to alias the output digest and source data
234 * buffer, and also modify the source data buffer after it has been used.
235 */
test_hash_overlaps(struct kunit * test)236 static void test_hash_overlaps(struct kunit *test)
237 {
238 const size_t max_tested_len = TEST_BUF_LEN - HASH_SIZE;
239 struct HASH_CTX ctx;
240 u8 hash[HASH_SIZE];
241
242 rand_bytes(test_buf, TEST_BUF_LEN);
243
244 for (int i = 0; i < 100; i++) {
245 size_t len = rand_length(max_tested_len);
246 size_t offset = HASH_SIZE + rand_offset(max_tested_len - len);
247 bool left_end = rand_bool();
248 u8 *ovl_hash = left_end ? &test_buf[offset] :
249 &test_buf[offset + len - HASH_SIZE];
250
251 HASH(&test_buf[offset], len, hash);
252 HASH(&test_buf[offset], len, ovl_hash);
253 KUNIT_ASSERT_MEMEQ_MSG(
254 test, hash, ovl_hash, HASH_SIZE,
255 "Overlap test 1 failed with len=%zu offset=%zu left_end=%d",
256 len, offset, left_end);
257
258 /* Repeat the above test, but this time use init+update+final */
259 HASH(&test_buf[offset], len, hash);
260 HASH_INIT(&ctx);
261 HASH_UPDATE(&ctx, &test_buf[offset], len);
262 HASH_FINAL(&ctx, ovl_hash);
263 KUNIT_ASSERT_MEMEQ_MSG(
264 test, hash, ovl_hash, HASH_SIZE,
265 "Overlap test 2 failed with len=%zu offset=%zu left_end=%d",
266 len, offset, left_end);
267
268 /* Test modifying the source data after it was used. */
269 HASH(&test_buf[offset], len, hash);
270 HASH_INIT(&ctx);
271 HASH_UPDATE(&ctx, &test_buf[offset], len);
272 rand_bytes(&test_buf[offset], len);
273 HASH_FINAL(&ctx, ovl_hash);
274 KUNIT_ASSERT_MEMEQ_MSG(
275 test, hash, ovl_hash, HASH_SIZE,
276 "Overlap test 3 failed with len=%zu offset=%zu left_end=%d",
277 len, offset, left_end);
278 }
279 }
280
281 /*
282 * Test that if the same data is hashed at different alignments in memory, the
283 * results are the same.
284 */
test_hash_alignment_consistency(struct kunit * test)285 static void test_hash_alignment_consistency(struct kunit *test)
286 {
287 u8 hash1[128 + HASH_SIZE];
288 u8 hash2[128 + HASH_SIZE];
289
290 for (int i = 0; i < 100; i++) {
291 size_t len = rand_length(TEST_BUF_LEN);
292 size_t data_offs1 = rand_offset(TEST_BUF_LEN - len);
293 size_t data_offs2 = rand_offset(TEST_BUF_LEN - len);
294 size_t hash_offs1 = rand_offset(128);
295 size_t hash_offs2 = rand_offset(128);
296
297 rand_bytes(&test_buf[data_offs1], len);
298 HASH(&test_buf[data_offs1], len, &hash1[hash_offs1]);
299 memmove(&test_buf[data_offs2], &test_buf[data_offs1], len);
300 HASH(&test_buf[data_offs2], len, &hash2[hash_offs2]);
301 KUNIT_ASSERT_MEMEQ_MSG(
302 test, &hash1[hash_offs1], &hash2[hash_offs2], HASH_SIZE,
303 "Alignment consistency test failed with len=%zu data_offs=(%zu,%zu) hash_offs=(%zu,%zu)",
304 len, data_offs1, data_offs2, hash_offs1, hash_offs2);
305 }
306 }
307
308 /* Test that HASH_FINAL zeroizes the context. */
test_hash_ctx_zeroization(struct kunit * test)309 static void test_hash_ctx_zeroization(struct kunit *test)
310 {
311 static const u8 zeroes[sizeof(struct HASH_CTX)];
312 struct HASH_CTX ctx;
313
314 rand_bytes(test_buf, 128);
315 HASH_INIT(&ctx);
316 HASH_UPDATE(&ctx, test_buf, 128);
317 HASH_FINAL(&ctx, test_buf);
318 KUNIT_ASSERT_MEMEQ_MSG(test, &ctx, zeroes, sizeof(ctx),
319 "Hash context was not zeroized by finalization");
320 }
321
322 #define IRQ_TEST_HRTIMER_INTERVAL us_to_ktime(5)
323
324 struct hash_irq_test_state {
325 bool (*func)(void *test_specific_state);
326 void *test_specific_state;
327 bool task_func_reported_failure;
328 bool hardirq_func_reported_failure;
329 bool softirq_func_reported_failure;
330 unsigned long hardirq_func_calls;
331 unsigned long softirq_func_calls;
332 struct hrtimer timer;
333 struct work_struct bh_work;
334 };
335
hash_irq_test_timer_func(struct hrtimer * timer)336 static enum hrtimer_restart hash_irq_test_timer_func(struct hrtimer *timer)
337 {
338 struct hash_irq_test_state *state =
339 container_of(timer, typeof(*state), timer);
340
341 WARN_ON_ONCE(!in_hardirq());
342 state->hardirq_func_calls++;
343
344 if (!state->func(state->test_specific_state))
345 state->hardirq_func_reported_failure = true;
346
347 hrtimer_forward_now(&state->timer, IRQ_TEST_HRTIMER_INTERVAL);
348 queue_work(system_bh_wq, &state->bh_work);
349 return HRTIMER_RESTART;
350 }
351
hash_irq_test_bh_work_func(struct work_struct * work)352 static void hash_irq_test_bh_work_func(struct work_struct *work)
353 {
354 struct hash_irq_test_state *state =
355 container_of(work, typeof(*state), bh_work);
356
357 WARN_ON_ONCE(!in_serving_softirq());
358 state->softirq_func_calls++;
359
360 if (!state->func(state->test_specific_state))
361 state->softirq_func_reported_failure = true;
362 }
363
364 /*
365 * Helper function which repeatedly runs the given @func in task, softirq, and
366 * hardirq context concurrently, and reports a failure to KUnit if any
367 * invocation of @func in any context returns false. @func is passed
368 * @test_specific_state as its argument. At most 3 invocations of @func will
369 * run concurrently: one in each of task, softirq, and hardirq context.
370 *
371 * The main purpose of this interrupt context testing is to validate fallback
372 * code paths that run in contexts where the normal code path cannot be used,
373 * typically due to the FPU or vector registers already being in-use in kernel
374 * mode. These code paths aren't covered when the test code is executed only by
375 * the KUnit test runner thread in task context. The reason for the concurrency
376 * is because merely using hardirq context is not sufficient to reach a fallback
377 * code path on some architectures; the hardirq actually has to occur while the
378 * FPU or vector unit was already in-use in kernel mode.
379 *
380 * Another purpose of this testing is to detect issues with the architecture's
381 * irq_fpu_usable() and kernel_fpu_begin/end() or equivalent functions,
382 * especially in softirq context when the softirq may have interrupted a task
383 * already using kernel-mode FPU or vector (if the arch didn't prevent that).
384 * Crypto functions are often executed in softirqs, so this is important.
385 */
run_irq_test(struct kunit * test,bool (* func)(void *),int max_iterations,void * test_specific_state)386 static void run_irq_test(struct kunit *test, bool (*func)(void *),
387 int max_iterations, void *test_specific_state)
388 {
389 struct hash_irq_test_state state = {
390 .func = func,
391 .test_specific_state = test_specific_state,
392 };
393 unsigned long end_jiffies;
394
395 /*
396 * Set up a hrtimer (the way we access hardirq context) and a work
397 * struct for the BH workqueue (the way we access softirq context).
398 */
399 hrtimer_setup_on_stack(&state.timer, hash_irq_test_timer_func,
400 CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
401 INIT_WORK_ONSTACK(&state.bh_work, hash_irq_test_bh_work_func);
402
403 /* Run for up to max_iterations or 1 second, whichever comes first. */
404 end_jiffies = jiffies + HZ;
405 hrtimer_start(&state.timer, IRQ_TEST_HRTIMER_INTERVAL,
406 HRTIMER_MODE_REL_HARD);
407 for (int i = 0; i < max_iterations && !time_after(jiffies, end_jiffies);
408 i++) {
409 if (!func(test_specific_state))
410 state.task_func_reported_failure = true;
411 }
412
413 /* Cancel the timer and work. */
414 hrtimer_cancel(&state.timer);
415 flush_work(&state.bh_work);
416
417 /* Sanity check: the timer and BH functions should have been run. */
418 KUNIT_EXPECT_GT_MSG(test, state.hardirq_func_calls, 0,
419 "Timer function was not called");
420 KUNIT_EXPECT_GT_MSG(test, state.softirq_func_calls, 0,
421 "BH work function was not called");
422
423 /* Check for incorrect hash values reported from any context. */
424 KUNIT_EXPECT_FALSE_MSG(
425 test, state.task_func_reported_failure,
426 "Incorrect hash values reported from task context");
427 KUNIT_EXPECT_FALSE_MSG(
428 test, state.hardirq_func_reported_failure,
429 "Incorrect hash values reported from hardirq context");
430 KUNIT_EXPECT_FALSE_MSG(
431 test, state.softirq_func_reported_failure,
432 "Incorrect hash values reported from softirq context");
433 }
434
435 #define IRQ_TEST_DATA_LEN 256
436 #define IRQ_TEST_NUM_BUFFERS 3 /* matches max concurrency level */
437
438 struct hash_irq_test1_state {
439 u8 expected_hashes[IRQ_TEST_NUM_BUFFERS][HASH_SIZE];
440 atomic_t seqno;
441 };
442
443 /*
444 * Compute the hash of one of the test messages and verify that it matches the
445 * expected hash from @state->expected_hashes. To increase the chance of
446 * detecting problems, cycle through multiple messages.
447 */
hash_irq_test1_func(void * state_)448 static bool hash_irq_test1_func(void *state_)
449 {
450 struct hash_irq_test1_state *state = state_;
451 u32 i = (u32)atomic_inc_return(&state->seqno) % IRQ_TEST_NUM_BUFFERS;
452 u8 actual_hash[HASH_SIZE];
453
454 HASH(&test_buf[i * IRQ_TEST_DATA_LEN], IRQ_TEST_DATA_LEN, actual_hash);
455 return memcmp(actual_hash, state->expected_hashes[i], HASH_SIZE) == 0;
456 }
457
458 /*
459 * Test that if hashes are computed in task, softirq, and hardirq context
460 * concurrently, then all results are as expected.
461 */
test_hash_interrupt_context_1(struct kunit * test)462 static void test_hash_interrupt_context_1(struct kunit *test)
463 {
464 struct hash_irq_test1_state state = {};
465
466 /* Prepare some test messages and compute the expected hash of each. */
467 rand_bytes(test_buf, IRQ_TEST_NUM_BUFFERS * IRQ_TEST_DATA_LEN);
468 for (int i = 0; i < IRQ_TEST_NUM_BUFFERS; i++)
469 HASH(&test_buf[i * IRQ_TEST_DATA_LEN], IRQ_TEST_DATA_LEN,
470 state.expected_hashes[i]);
471
472 run_irq_test(test, hash_irq_test1_func, 100000, &state);
473 }
474
475 struct hash_irq_test2_hash_ctx {
476 struct HASH_CTX hash_ctx;
477 atomic_t in_use;
478 int offset;
479 int step;
480 };
481
482 struct hash_irq_test2_state {
483 struct hash_irq_test2_hash_ctx ctxs[IRQ_TEST_NUM_BUFFERS];
484 u8 expected_hash[HASH_SIZE];
485 u16 update_lens[32];
486 int num_steps;
487 };
488
hash_irq_test2_func(void * state_)489 static bool hash_irq_test2_func(void *state_)
490 {
491 struct hash_irq_test2_state *state = state_;
492 struct hash_irq_test2_hash_ctx *ctx;
493 bool ret = true;
494
495 for (ctx = &state->ctxs[0]; ctx < &state->ctxs[ARRAY_SIZE(state->ctxs)];
496 ctx++) {
497 if (atomic_cmpxchg(&ctx->in_use, 0, 1) == 0)
498 break;
499 }
500 if (WARN_ON_ONCE(ctx == &state->ctxs[ARRAY_SIZE(state->ctxs)])) {
501 /*
502 * This should never happen, as the number of contexts is equal
503 * to the maximum concurrency level of run_irq_test().
504 */
505 return false;
506 }
507
508 if (ctx->step == 0) {
509 /* Init step */
510 HASH_INIT(&ctx->hash_ctx);
511 ctx->offset = 0;
512 ctx->step++;
513 } else if (ctx->step < state->num_steps - 1) {
514 /* Update step */
515 HASH_UPDATE(&ctx->hash_ctx, &test_buf[ctx->offset],
516 state->update_lens[ctx->step - 1]);
517 ctx->offset += state->update_lens[ctx->step - 1];
518 ctx->step++;
519 } else {
520 /* Final step */
521 u8 actual_hash[HASH_SIZE];
522
523 if (WARN_ON_ONCE(ctx->offset != TEST_BUF_LEN))
524 ret = false;
525 HASH_FINAL(&ctx->hash_ctx, actual_hash);
526 if (memcmp(actual_hash, state->expected_hash, HASH_SIZE) != 0)
527 ret = false;
528 ctx->step = 0;
529 }
530 atomic_set_release(&ctx->in_use, 0);
531 return ret;
532 }
533
534 /*
535 * Test that if hashes are computed in task, softirq, and hardirq context
536 * concurrently, *including doing different parts of the same incremental
537 * computation in different contexts*, then all results are as expected.
538 * Besides detecting bugs similar to those that test_hash_interrupt_context_1
539 * can detect, this test case can also detect bugs where hash function
540 * implementations don't correctly handle these mixed incremental computations.
541 */
test_hash_interrupt_context_2(struct kunit * test)542 static void test_hash_interrupt_context_2(struct kunit *test)
543 {
544 struct hash_irq_test2_state *state;
545 int remaining = TEST_BUF_LEN;
546
547 state = kunit_kzalloc(test, sizeof(*state), GFP_KERNEL);
548 KUNIT_ASSERT_NOT_NULL(test, state);
549
550 rand_bytes(test_buf, TEST_BUF_LEN);
551 HASH(test_buf, TEST_BUF_LEN, state->expected_hash);
552
553 /*
554 * Generate a list of update lengths to use. Ensure that it contains
555 * multiple entries but is limited to a maximum length.
556 */
557 static_assert(TEST_BUF_LEN / 4096 > 1);
558 for (state->num_steps = 0;
559 state->num_steps < ARRAY_SIZE(state->update_lens) - 1 && remaining;
560 state->num_steps++) {
561 state->update_lens[state->num_steps] =
562 rand_length(min(remaining, 4096));
563 remaining -= state->update_lens[state->num_steps];
564 }
565 if (remaining)
566 state->update_lens[state->num_steps++] = remaining;
567 state->num_steps += 2; /* for init and final */
568
569 run_irq_test(test, hash_irq_test2_func, 250000, state);
570 }
571
572 #define UNKEYED_HASH_KUNIT_CASES \
573 KUNIT_CASE(test_hash_test_vectors), \
574 KUNIT_CASE(test_hash_all_lens_up_to_4096), \
575 KUNIT_CASE(test_hash_incremental_updates), \
576 KUNIT_CASE(test_hash_buffer_overruns), \
577 KUNIT_CASE(test_hash_overlaps), \
578 KUNIT_CASE(test_hash_alignment_consistency), \
579 KUNIT_CASE(test_hash_ctx_zeroization), \
580 KUNIT_CASE(test_hash_interrupt_context_1), \
581 KUNIT_CASE(test_hash_interrupt_context_2)
582 /* benchmark_hash is omitted so that the suites can put it last. */
583
584 #ifdef HMAC
585 /*
586 * Test the corresponding HMAC variant.
587 *
588 * This test case is fairly short, since HMAC is just a simple C wrapper around
589 * the underlying unkeyed hash function, which is already well-tested by the
590 * other test cases. It's not useful to test things like data alignment or
591 * interrupt context again for HMAC, nor to have a long list of test vectors.
592 *
593 * Thus, just do a single consolidated test, which covers all data lengths up to
594 * 4096 bytes and all key lengths up to 292 bytes. For each data length, select
595 * a key length, generate the inputs from a seed, and compute the HMAC value.
596 * Concatenate all these HMAC values together, and compute the HMAC of that.
597 * Verify that value. If this fails, then the HMAC implementation is wrong.
598 * This won't show which specific input failed, but that should be fine. Any
599 * failure would likely be non-input-specific or also show in the unkeyed tests.
600 */
test_hmac(struct kunit * test)601 static void test_hmac(struct kunit *test)
602 {
603 static const u8 zeroes[sizeof(struct HMAC_CTX)];
604 u8 *raw_key;
605 struct HMAC_KEY key;
606 struct HMAC_CTX ctx;
607 u8 mac[HASH_SIZE];
608 u8 mac2[HASH_SIZE];
609
610 static_assert(TEST_BUF_LEN >= 4096 + 293);
611 rand_bytes_seeded_from_len(test_buf, 4096);
612 raw_key = &test_buf[4096];
613
614 rand_bytes_seeded_from_len(raw_key, 32);
615 HMAC_PREPAREKEY(&key, raw_key, 32);
616 HMAC_INIT(&ctx, &key);
617 for (size_t data_len = 0; data_len <= 4096; data_len++) {
618 /*
619 * Cycle through key lengths as well. Somewhat arbitrarily go
620 * up to 293, which is somewhat larger than the largest hash
621 * block size (which is the size at which the key starts being
622 * hashed down to one block); going higher would not be useful.
623 * To reduce correlation with data_len, use a prime number here.
624 */
625 size_t key_len = data_len % 293;
626
627 HMAC_UPDATE(&ctx, test_buf, data_len);
628
629 rand_bytes_seeded_from_len(raw_key, key_len);
630 HMAC_USINGRAWKEY(raw_key, key_len, test_buf, data_len, mac);
631 HMAC_UPDATE(&ctx, mac, HASH_SIZE);
632
633 /* Verify that HMAC() is consistent with HMAC_USINGRAWKEY(). */
634 HMAC_PREPAREKEY(&key, raw_key, key_len);
635 HMAC(&key, test_buf, data_len, mac2);
636 KUNIT_ASSERT_MEMEQ_MSG(
637 test, mac, mac2, HASH_SIZE,
638 "HMAC gave different results with raw and prepared keys");
639 }
640 HMAC_FINAL(&ctx, mac);
641 KUNIT_EXPECT_MEMEQ_MSG(test, mac, hmac_testvec_consolidated, HASH_SIZE,
642 "HMAC gave wrong result");
643 KUNIT_EXPECT_MEMEQ_MSG(test, &ctx, zeroes, sizeof(ctx),
644 "HMAC context was not zeroized by finalization");
645 }
646 #define HASH_KUNIT_CASES UNKEYED_HASH_KUNIT_CASES, KUNIT_CASE(test_hmac)
647 #else
648 #define HASH_KUNIT_CASES UNKEYED_HASH_KUNIT_CASES
649 #endif
650
651 /* Benchmark the hash function on various data lengths. */
benchmark_hash(struct kunit * test)652 static void benchmark_hash(struct kunit *test)
653 {
654 static const size_t lens_to_test[] = {
655 1, 16, 64, 127, 128, 200, 256,
656 511, 512, 1024, 3173, 4096, 16384,
657 };
658 u8 hash[HASH_SIZE];
659
660 if (!IS_ENABLED(CONFIG_CRYPTO_LIB_BENCHMARK))
661 kunit_skip(test, "not enabled");
662
663 /* Warm-up */
664 for (size_t i = 0; i < 10000000; i += TEST_BUF_LEN)
665 HASH(test_buf, TEST_BUF_LEN, hash);
666
667 for (size_t i = 0; i < ARRAY_SIZE(lens_to_test); i++) {
668 size_t len = lens_to_test[i];
669 /* The '+ 128' tries to account for per-message overhead. */
670 size_t num_iters = 10000000 / (len + 128);
671 u64 t;
672
673 KUNIT_ASSERT_LE(test, len, TEST_BUF_LEN);
674 preempt_disable();
675 t = ktime_get_ns();
676 for (size_t j = 0; j < num_iters; j++)
677 HASH(test_buf, len, hash);
678 t = ktime_get_ns() - t;
679 preempt_enable();
680 kunit_info(test, "len=%zu: %llu MB/s", len,
681 div64_u64((u64)len * num_iters * 1000, t ?: 1));
682 }
683 }
684