1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 #include <sys/zfs_context.h>
27 #include <sys/crypto/icp.h>
28 #include <sys/crypto/spi.h>
29 #include <sys/simd.h>
30 #include <modes/modes.h>
31 #include <aes/aes_impl.h>
32
33 /*
34 * Initialize AES encryption and decryption key schedules.
35 *
36 * Parameters:
37 * cipherKey User key
38 * keyBits AES key size (128, 192, or 256 bits)
39 * keysched AES key schedule to be initialized, of type aes_key_t.
40 * Allocated by aes_alloc_keysched().
41 */
42 void
aes_init_keysched(const uint8_t * cipherKey,uint_t keyBits,void * keysched)43 aes_init_keysched(const uint8_t *cipherKey, uint_t keyBits, void *keysched)
44 {
45 const aes_impl_ops_t *ops = aes_impl_get_ops();
46 aes_key_t *newbie = keysched;
47 uint_t keysize, i, j;
48 union {
49 uint64_t ka64[4];
50 uint32_t ka32[8];
51 } keyarr;
52
53 switch (keyBits) {
54 case 128:
55 newbie->nr = 10;
56 break;
57
58 case 192:
59 newbie->nr = 12;
60 break;
61
62 case 256:
63 newbie->nr = 14;
64 break;
65
66 default:
67 /* should never get here */
68 return;
69 }
70 keysize = CRYPTO_BITS2BYTES(keyBits);
71
72 /*
73 * Generic C implementation requires byteswap for little endian
74 * machines, various accelerated implementations for various
75 * architectures may not.
76 */
77 if (!ops->needs_byteswap) {
78 /* no byteswap needed */
79 if (IS_P2ALIGNED(cipherKey, sizeof (uint64_t))) {
80 for (i = 0, j = 0; j < keysize; i++, j += 8) {
81 /* LINTED: pointer alignment */
82 keyarr.ka64[i] = *((uint64_t *)&cipherKey[j]);
83 }
84 } else {
85 memcpy(keyarr.ka32, cipherKey, keysize);
86 }
87 } else {
88 /* byte swap */
89 for (i = 0, j = 0; j < keysize; i++, j += 4) {
90 keyarr.ka32[i] =
91 htonl(*(uint32_t *)(void *)&cipherKey[j]);
92 }
93 }
94
95 ops->generate(newbie, keyarr.ka32, keyBits);
96 newbie->ops = ops;
97
98 /*
99 * Note: if there are systems that need the AES_64BIT_KS type in the
100 * future, move setting key schedule type to individual implementations
101 */
102 newbie->type = AES_32BIT_KS;
103 }
104
105
106 /*
107 * Encrypt one block using AES.
108 * Align if needed and (for x86 32-bit only) byte-swap.
109 *
110 * Parameters:
111 * ks Key schedule, of type aes_key_t
112 * pt Input block (plain text)
113 * ct Output block (crypto text). Can overlap with pt
114 */
115 int
aes_encrypt_block(const void * ks,const uint8_t * pt,uint8_t * ct)116 aes_encrypt_block(const void *ks, const uint8_t *pt, uint8_t *ct)
117 {
118 aes_key_t *ksch = (aes_key_t *)ks;
119 const aes_impl_ops_t *ops = ksch->ops;
120
121 if (IS_P2ALIGNED2(pt, ct, sizeof (uint32_t)) && !ops->needs_byteswap) {
122 /* LINTED: pointer alignment */
123 ops->encrypt(&ksch->encr_ks.ks32[0], ksch->nr,
124 /* LINTED: pointer alignment */
125 (uint32_t *)pt, (uint32_t *)ct);
126 } else {
127 uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)];
128
129 /* Copy input block into buffer */
130 if (ops->needs_byteswap) {
131 buffer[0] = htonl(*(uint32_t *)(void *)&pt[0]);
132 buffer[1] = htonl(*(uint32_t *)(void *)&pt[4]);
133 buffer[2] = htonl(*(uint32_t *)(void *)&pt[8]);
134 buffer[3] = htonl(*(uint32_t *)(void *)&pt[12]);
135 } else
136 memcpy(&buffer, pt, AES_BLOCK_LEN);
137
138 ops->encrypt(&ksch->encr_ks.ks32[0], ksch->nr, buffer, buffer);
139
140 /* Copy result from buffer to output block */
141 if (ops->needs_byteswap) {
142 *(uint32_t *)(void *)&ct[0] = htonl(buffer[0]);
143 *(uint32_t *)(void *)&ct[4] = htonl(buffer[1]);
144 *(uint32_t *)(void *)&ct[8] = htonl(buffer[2]);
145 *(uint32_t *)(void *)&ct[12] = htonl(buffer[3]);
146 } else
147 memcpy(ct, &buffer, AES_BLOCK_LEN);
148 }
149 return (CRYPTO_SUCCESS);
150 }
151
152
153 /*
154 * Decrypt one block using AES.
155 * Align and byte-swap if needed.
156 *
157 * Parameters:
158 * ks Key schedule, of type aes_key_t
159 * ct Input block (crypto text)
160 * pt Output block (plain text). Can overlap with pt
161 */
162 int
aes_decrypt_block(const void * ks,const uint8_t * ct,uint8_t * pt)163 aes_decrypt_block(const void *ks, const uint8_t *ct, uint8_t *pt)
164 {
165 aes_key_t *ksch = (aes_key_t *)ks;
166 const aes_impl_ops_t *ops = ksch->ops;
167
168 if (IS_P2ALIGNED2(ct, pt, sizeof (uint32_t)) && !ops->needs_byteswap) {
169 /* LINTED: pointer alignment */
170 ops->decrypt(&ksch->decr_ks.ks32[0], ksch->nr,
171 /* LINTED: pointer alignment */
172 (uint32_t *)ct, (uint32_t *)pt);
173 } else {
174 uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)];
175
176 /* Copy input block into buffer */
177 if (ops->needs_byteswap) {
178 buffer[0] = htonl(*(uint32_t *)(void *)&ct[0]);
179 buffer[1] = htonl(*(uint32_t *)(void *)&ct[4]);
180 buffer[2] = htonl(*(uint32_t *)(void *)&ct[8]);
181 buffer[3] = htonl(*(uint32_t *)(void *)&ct[12]);
182 } else
183 memcpy(&buffer, ct, AES_BLOCK_LEN);
184
185 ops->decrypt(&ksch->decr_ks.ks32[0], ksch->nr, buffer, buffer);
186
187 /* Copy result from buffer to output block */
188 if (ops->needs_byteswap) {
189 *(uint32_t *)(void *)&pt[0] = htonl(buffer[0]);
190 *(uint32_t *)(void *)&pt[4] = htonl(buffer[1]);
191 *(uint32_t *)(void *)&pt[8] = htonl(buffer[2]);
192 *(uint32_t *)(void *)&pt[12] = htonl(buffer[3]);
193 } else
194 memcpy(pt, &buffer, AES_BLOCK_LEN);
195 }
196 return (CRYPTO_SUCCESS);
197 }
198
199
200 /*
201 * Allocate key schedule for AES.
202 *
203 * Return the pointer and set size to the number of bytes allocated.
204 * Memory allocated must be freed by the caller when done.
205 *
206 * Parameters:
207 * size Size of key schedule allocated, in bytes
208 * kmflag Flag passed to kmem_alloc(9F); ignored in userland.
209 */
210 void *
aes_alloc_keysched(size_t * size,int kmflag)211 aes_alloc_keysched(size_t *size, int kmflag)
212 {
213 aes_key_t *keysched;
214
215 keysched = kmem_alloc(sizeof (aes_key_t), kmflag);
216 if (keysched != NULL) {
217 *size = sizeof (aes_key_t);
218 return (keysched);
219 }
220 return (NULL);
221 }
222
223 /* AES implementation that contains the fastest methods */
224 static aes_impl_ops_t aes_fastest_impl = {
225 .name = "fastest"
226 };
227
228 /* All compiled in implementations */
229 static const aes_impl_ops_t *aes_all_impl[] = {
230 &aes_generic_impl,
231 #if defined(__x86_64)
232 &aes_x86_64_impl,
233 #endif
234 #if defined(__x86_64) && defined(HAVE_AES)
235 &aes_aesni_impl,
236 #endif
237 };
238
239 /* Indicate that benchmark has been completed */
240 static boolean_t aes_impl_initialized = B_FALSE;
241
242 /* Select aes implementation */
243 #define IMPL_FASTEST (UINT32_MAX)
244 #define IMPL_CYCLE (UINT32_MAX-1)
245
246 #define AES_IMPL_READ(i) (*(volatile uint32_t *) &(i))
247
248 static uint32_t icp_aes_impl = IMPL_FASTEST;
249 static uint32_t user_sel_impl = IMPL_FASTEST;
250
251 /* Hold all supported implementations */
252 static size_t aes_supp_impl_cnt = 0;
253 static aes_impl_ops_t *aes_supp_impl[ARRAY_SIZE(aes_all_impl)];
254
255 /*
256 * Returns the AES operations for encrypt/decrypt/key setup. When a
257 * SIMD implementation is not allowed in the current context, then
258 * fallback to the fastest generic implementation.
259 */
260 const aes_impl_ops_t *
aes_impl_get_ops(void)261 aes_impl_get_ops(void)
262 {
263 if (!kfpu_allowed())
264 return (&aes_generic_impl);
265
266 const aes_impl_ops_t *ops = NULL;
267 const uint32_t impl = AES_IMPL_READ(icp_aes_impl);
268
269 switch (impl) {
270 case IMPL_FASTEST:
271 ASSERT(aes_impl_initialized);
272 ops = &aes_fastest_impl;
273 break;
274 case IMPL_CYCLE:
275 /* Cycle through supported implementations */
276 ASSERT(aes_impl_initialized);
277 ASSERT3U(aes_supp_impl_cnt, >, 0);
278 static size_t cycle_impl_idx = 0;
279 size_t idx = (++cycle_impl_idx) % aes_supp_impl_cnt;
280 ops = aes_supp_impl[idx];
281 break;
282 default:
283 ASSERT3U(impl, <, aes_supp_impl_cnt);
284 ASSERT3U(aes_supp_impl_cnt, >, 0);
285 if (impl < ARRAY_SIZE(aes_all_impl))
286 ops = aes_supp_impl[impl];
287 break;
288 }
289
290 ASSERT3P(ops, !=, NULL);
291
292 return (ops);
293 }
294
295 /*
296 * Initialize all supported implementations.
297 */
298 void
aes_impl_init(void)299 aes_impl_init(void)
300 {
301 aes_impl_ops_t *curr_impl;
302 int i, c;
303
304 /* Move supported implementations into aes_supp_impls */
305 for (i = 0, c = 0; i < ARRAY_SIZE(aes_all_impl); i++) {
306 curr_impl = (aes_impl_ops_t *)aes_all_impl[i];
307
308 if (curr_impl->is_supported())
309 aes_supp_impl[c++] = (aes_impl_ops_t *)curr_impl;
310 }
311 aes_supp_impl_cnt = c;
312
313 /*
314 * Set the fastest implementation given the assumption that the
315 * hardware accelerated version is the fastest.
316 */
317 #if defined(__x86_64)
318 #if defined(HAVE_AES)
319 if (aes_aesni_impl.is_supported()) {
320 memcpy(&aes_fastest_impl, &aes_aesni_impl,
321 sizeof (aes_fastest_impl));
322 } else
323 #endif
324 {
325 memcpy(&aes_fastest_impl, &aes_x86_64_impl,
326 sizeof (aes_fastest_impl));
327 }
328 #else
329 memcpy(&aes_fastest_impl, &aes_generic_impl,
330 sizeof (aes_fastest_impl));
331 #endif
332
333 strlcpy(aes_fastest_impl.name, "fastest", AES_IMPL_NAME_MAX);
334
335 /* Finish initialization */
336 atomic_swap_32(&icp_aes_impl, user_sel_impl);
337 aes_impl_initialized = B_TRUE;
338 }
339
340 static const struct {
341 const char *name;
342 uint32_t sel;
343 } aes_impl_opts[] = {
344 { "cycle", IMPL_CYCLE },
345 { "fastest", IMPL_FASTEST },
346 };
347
348 /*
349 * Function sets desired aes implementation.
350 *
351 * If we are called before init(), user preference will be saved in
352 * user_sel_impl, and applied in later init() call. This occurs when module
353 * parameter is specified on module load. Otherwise, directly update
354 * icp_aes_impl.
355 *
356 * @val Name of aes implementation to use
357 * @param Unused.
358 */
359 int
aes_impl_set(const char * val)360 aes_impl_set(const char *val)
361 {
362 int err = -EINVAL;
363 char req_name[AES_IMPL_NAME_MAX];
364 uint32_t impl = AES_IMPL_READ(user_sel_impl);
365 size_t i;
366
367 /* sanitize input */
368 i = strnlen(val, AES_IMPL_NAME_MAX);
369 if (i == 0 || i >= AES_IMPL_NAME_MAX)
370 return (err);
371
372 strlcpy(req_name, val, AES_IMPL_NAME_MAX);
373 while (i > 0 && isspace(req_name[i-1]))
374 i--;
375 req_name[i] = '\0';
376
377 /* Check mandatory options */
378 for (i = 0; i < ARRAY_SIZE(aes_impl_opts); i++) {
379 if (strcmp(req_name, aes_impl_opts[i].name) == 0) {
380 impl = aes_impl_opts[i].sel;
381 err = 0;
382 break;
383 }
384 }
385
386 /* check all supported impl if init() was already called */
387 if (err != 0 && aes_impl_initialized) {
388 /* check all supported implementations */
389 for (i = 0; i < aes_supp_impl_cnt; i++) {
390 if (strcmp(req_name, aes_supp_impl[i]->name) == 0) {
391 impl = i;
392 err = 0;
393 break;
394 }
395 }
396 }
397
398 if (err == 0) {
399 if (aes_impl_initialized)
400 atomic_swap_32(&icp_aes_impl, impl);
401 else
402 atomic_swap_32(&user_sel_impl, impl);
403 }
404
405 return (err);
406 }
407
408 #if defined(_KERNEL) && defined(__linux__)
409
410 static int
icp_aes_impl_set(const char * val,zfs_kernel_param_t * kp)411 icp_aes_impl_set(const char *val, zfs_kernel_param_t *kp)
412 {
413 return (aes_impl_set(val));
414 }
415
416 static int
icp_aes_impl_get(char * buffer,zfs_kernel_param_t * kp)417 icp_aes_impl_get(char *buffer, zfs_kernel_param_t *kp)
418 {
419 int i, cnt = 0;
420 char *fmt;
421 const uint32_t impl = AES_IMPL_READ(icp_aes_impl);
422
423 ASSERT(aes_impl_initialized);
424
425 /* list mandatory options */
426 for (i = 0; i < ARRAY_SIZE(aes_impl_opts); i++) {
427 fmt = (impl == aes_impl_opts[i].sel) ? "[%s] " : "%s ";
428 cnt += kmem_scnprintf(buffer + cnt, PAGE_SIZE - cnt, fmt,
429 aes_impl_opts[i].name);
430 }
431
432 /* list all supported implementations */
433 for (i = 0; i < aes_supp_impl_cnt; i++) {
434 fmt = (i == impl) ? "[%s] " : "%s ";
435 cnt += kmem_scnprintf(buffer + cnt, PAGE_SIZE - cnt, fmt,
436 aes_supp_impl[i]->name);
437 }
438
439 return (cnt);
440 }
441
442 module_param_call(icp_aes_impl, icp_aes_impl_set, icp_aes_impl_get,
443 NULL, 0644);
444 MODULE_PARM_DESC(icp_aes_impl, "Select aes implementation.");
445 #endif
446