xref: /linux/lib/crypto/arm64/sha256.h (revision 68a052239fc4b351e961f698b824f7654a346091)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * SHA-256 optimized for ARM64
4  *
5  * Copyright 2025 Google LLC
6  */
7 #include <asm/neon.h>
8 #include <asm/simd.h>
9 #include <linux/cpufeature.h>
10 
11 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
12 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
13 
14 asmlinkage void sha256_block_data_order(struct sha256_block_state *state,
15 					const u8 *data, size_t nblocks);
16 asmlinkage void sha256_block_neon(struct sha256_block_state *state,
17 				  const u8 *data, size_t nblocks);
18 asmlinkage size_t __sha256_ce_transform(struct sha256_block_state *state,
19 					const u8 *data, size_t nblocks);
20 
21 static void sha256_blocks(struct sha256_block_state *state,
22 			  const u8 *data, size_t nblocks)
23 {
24 	if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
25 	    static_branch_likely(&have_neon) && likely(may_use_simd())) {
26 		if (static_branch_likely(&have_ce)) {
27 			do {
28 				size_t rem;
29 
30 				kernel_neon_begin();
31 				rem = __sha256_ce_transform(state,
32 							    data, nblocks);
33 				kernel_neon_end();
34 				data += (nblocks - rem) * SHA256_BLOCK_SIZE;
35 				nblocks = rem;
36 			} while (nblocks);
37 		} else {
38 			kernel_neon_begin();
39 			sha256_block_neon(state, data, nblocks);
40 			kernel_neon_end();
41 		}
42 	} else {
43 		sha256_block_data_order(state, data, nblocks);
44 	}
45 }
46 
47 static_assert(offsetof(struct __sha256_ctx, state) == 0);
48 static_assert(offsetof(struct __sha256_ctx, bytecount) == 32);
49 static_assert(offsetof(struct __sha256_ctx, buf) == 40);
50 asmlinkage void sha256_ce_finup2x(const struct __sha256_ctx *ctx,
51 				  const u8 *data1, const u8 *data2, int len,
52 				  u8 out1[SHA256_DIGEST_SIZE],
53 				  u8 out2[SHA256_DIGEST_SIZE]);
54 
55 #define sha256_finup_2x_arch sha256_finup_2x_arch
56 static bool sha256_finup_2x_arch(const struct __sha256_ctx *ctx,
57 				 const u8 *data1, const u8 *data2, size_t len,
58 				 u8 out1[SHA256_DIGEST_SIZE],
59 				 u8 out2[SHA256_DIGEST_SIZE])
60 {
61 	/*
62 	 * The assembly requires len >= SHA256_BLOCK_SIZE && len <= INT_MAX.
63 	 * Further limit len to 65536 to avoid spending too long with preemption
64 	 * disabled.  (Of course, in practice len is nearly always 4096 anyway.)
65 	 */
66 	if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
67 	    static_branch_likely(&have_ce) && len >= SHA256_BLOCK_SIZE &&
68 	    len <= 65536 && likely(may_use_simd())) {
69 		kernel_neon_begin();
70 		sha256_ce_finup2x(ctx, data1, data2, len, out1, out2);
71 		kernel_neon_end();
72 		kmsan_unpoison_memory(out1, SHA256_DIGEST_SIZE);
73 		kmsan_unpoison_memory(out2, SHA256_DIGEST_SIZE);
74 		return true;
75 	}
76 	return false;
77 }
78 
79 static bool sha256_finup_2x_is_optimized_arch(void)
80 {
81 	return static_key_enabled(&have_ce);
82 }
83 
84 #ifdef CONFIG_KERNEL_MODE_NEON
85 #define sha256_mod_init_arch sha256_mod_init_arch
86 static void sha256_mod_init_arch(void)
87 {
88 	if (cpu_have_named_feature(ASIMD)) {
89 		static_branch_enable(&have_neon);
90 		if (cpu_have_named_feature(SHA2))
91 			static_branch_enable(&have_ce);
92 	}
93 }
94 #endif /* CONFIG_KERNEL_MODE_NEON */
95