xref: /linux/arch/arm64/crypto/sha2-ce-glue.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions
4  *
5  * Copyright (C) 2014 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
6  */
7 
8 #include <asm/neon.h>
9 #include <asm/simd.h>
10 #include <asm/unaligned.h>
11 #include <crypto/internal/hash.h>
12 #include <crypto/internal/simd.h>
13 #include <crypto/sha2.h>
14 #include <crypto/sha256_base.h>
15 #include <linux/cpufeature.h>
16 #include <linux/crypto.h>
17 #include <linux/module.h>
18 
19 MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
20 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
21 MODULE_LICENSE("GPL v2");
22 MODULE_ALIAS_CRYPTO("sha224");
23 MODULE_ALIAS_CRYPTO("sha256");
24 
25 struct sha256_ce_state {
26 	struct sha256_state	sst;
27 	u32			finalize;
28 };
29 
30 extern const u32 sha256_ce_offsetof_count;
31 extern const u32 sha256_ce_offsetof_finalize;
32 
33 asmlinkage int sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
34 				 int blocks);
35 
36 static void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
37 				int blocks)
38 {
39 	while (blocks) {
40 		int rem;
41 
42 		kernel_neon_begin();
43 		rem = sha2_ce_transform(container_of(sst, struct sha256_ce_state,
44 						     sst), src, blocks);
45 		kernel_neon_end();
46 		src += (blocks - rem) * SHA256_BLOCK_SIZE;
47 		blocks = rem;
48 	}
49 }
50 
51 const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
52 					      sst.count);
53 const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
54 						 finalize);
55 
56 asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
57 
58 static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
59 				      int blocks)
60 {
61 	sha256_block_data_order(sst->state, src, blocks);
62 }
63 
64 static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
65 			    unsigned int len)
66 {
67 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
68 
69 	if (!crypto_simd_usable())
70 		return sha256_base_do_update(desc, data, len,
71 				__sha256_block_data_order);
72 
73 	sctx->finalize = 0;
74 	sha256_base_do_update(desc, data, len, __sha2_ce_transform);
75 
76 	return 0;
77 }
78 
79 static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
80 			   unsigned int len, u8 *out)
81 {
82 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
83 	bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len;
84 
85 	if (!crypto_simd_usable()) {
86 		if (len)
87 			sha256_base_do_update(desc, data, len,
88 				__sha256_block_data_order);
89 		sha256_base_do_finalize(desc, __sha256_block_data_order);
90 		return sha256_base_finish(desc, out);
91 	}
92 
93 	/*
94 	 * Allow the asm code to perform the finalization if there is no
95 	 * partial data and the input is a round multiple of the block size.
96 	 */
97 	sctx->finalize = finalize;
98 
99 	sha256_base_do_update(desc, data, len, __sha2_ce_transform);
100 	if (!finalize)
101 		sha256_base_do_finalize(desc, __sha2_ce_transform);
102 	return sha256_base_finish(desc, out);
103 }
104 
105 static int sha256_ce_final(struct shash_desc *desc, u8 *out)
106 {
107 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
108 
109 	if (!crypto_simd_usable()) {
110 		sha256_base_do_finalize(desc, __sha256_block_data_order);
111 		return sha256_base_finish(desc, out);
112 	}
113 
114 	sctx->finalize = 0;
115 	sha256_base_do_finalize(desc, __sha2_ce_transform);
116 	return sha256_base_finish(desc, out);
117 }
118 
119 static int sha256_ce_export(struct shash_desc *desc, void *out)
120 {
121 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
122 
123 	memcpy(out, &sctx->sst, sizeof(struct sha256_state));
124 	return 0;
125 }
126 
127 static int sha256_ce_import(struct shash_desc *desc, const void *in)
128 {
129 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
130 
131 	memcpy(&sctx->sst, in, sizeof(struct sha256_state));
132 	sctx->finalize = 0;
133 	return 0;
134 }
135 
136 static struct shash_alg algs[] = { {
137 	.init			= sha224_base_init,
138 	.update			= sha256_ce_update,
139 	.final			= sha256_ce_final,
140 	.finup			= sha256_ce_finup,
141 	.export			= sha256_ce_export,
142 	.import			= sha256_ce_import,
143 	.descsize		= sizeof(struct sha256_ce_state),
144 	.statesize		= sizeof(struct sha256_state),
145 	.digestsize		= SHA224_DIGEST_SIZE,
146 	.base			= {
147 		.cra_name		= "sha224",
148 		.cra_driver_name	= "sha224-ce",
149 		.cra_priority		= 200,
150 		.cra_blocksize		= SHA256_BLOCK_SIZE,
151 		.cra_module		= THIS_MODULE,
152 	}
153 }, {
154 	.init			= sha256_base_init,
155 	.update			= sha256_ce_update,
156 	.final			= sha256_ce_final,
157 	.finup			= sha256_ce_finup,
158 	.export			= sha256_ce_export,
159 	.import			= sha256_ce_import,
160 	.descsize		= sizeof(struct sha256_ce_state),
161 	.statesize		= sizeof(struct sha256_state),
162 	.digestsize		= SHA256_DIGEST_SIZE,
163 	.base			= {
164 		.cra_name		= "sha256",
165 		.cra_driver_name	= "sha256-ce",
166 		.cra_priority		= 200,
167 		.cra_blocksize		= SHA256_BLOCK_SIZE,
168 		.cra_module		= THIS_MODULE,
169 	}
170 } };
171 
172 static int __init sha2_ce_mod_init(void)
173 {
174 	return crypto_register_shashes(algs, ARRAY_SIZE(algs));
175 }
176 
177 static void __exit sha2_ce_mod_fini(void)
178 {
179 	crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
180 }
181 
182 module_cpu_feature_match(SHA2, sha2_ce_mod_init);
183 module_exit(sha2_ce_mod_fini);
184