xref: /linux/lib/crypto/arm64/sha256.h (revision 13150742b09e720fdf021de14cd2b98b37415a89)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * SHA-256 optimized for ARM64
4  *
5  * Copyright 2025 Google LLC
6  */
7 #include <asm/neon.h>
8 #include <crypto/internal/simd.h>
9 #include <linux/cpufeature.h>
10 
11 asmlinkage void sha256_block_data_order(struct sha256_block_state *state,
12 					const u8 *data, size_t nblocks);
13 asmlinkage void sha256_block_neon(struct sha256_block_state *state,
14 				  const u8 *data, size_t nblocks);
15 asmlinkage size_t __sha256_ce_transform(struct sha256_block_state *state,
16 					const u8 *data, size_t nblocks);
17 
18 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
19 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
20 
sha256_blocks(struct sha256_block_state * state,const u8 * data,size_t nblocks)21 static void sha256_blocks(struct sha256_block_state *state,
22 			  const u8 *data, size_t nblocks)
23 {
24 	if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
25 	    static_branch_likely(&have_neon) && crypto_simd_usable()) {
26 		if (static_branch_likely(&have_ce)) {
27 			do {
28 				size_t rem;
29 
30 				kernel_neon_begin();
31 				rem = __sha256_ce_transform(state,
32 							    data, nblocks);
33 				kernel_neon_end();
34 				data += (nblocks - rem) * SHA256_BLOCK_SIZE;
35 				nblocks = rem;
36 			} while (nblocks);
37 		} else {
38 			kernel_neon_begin();
39 			sha256_block_neon(state, data, nblocks);
40 			kernel_neon_end();
41 		}
42 	} else {
43 		sha256_block_data_order(state, data, nblocks);
44 	}
45 }
46 
47 #ifdef CONFIG_KERNEL_MODE_NEON
48 #define sha256_mod_init_arch sha256_mod_init_arch
sha256_mod_init_arch(void)49 static inline void sha256_mod_init_arch(void)
50 {
51 	if (cpu_have_named_feature(ASIMD)) {
52 		static_branch_enable(&have_neon);
53 		if (cpu_have_named_feature(SHA2))
54 			static_branch_enable(&have_ce);
55 	}
56 }
57 #endif /* CONFIG_KERNEL_MODE_NEON */
58