xref: /freebsd/sys/contrib/openzfs/module/icp/algs/aes/aes_impl.c (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #include <sys/zfs_context.h>
26 #include <sys/crypto/icp.h>
27 #include <sys/crypto/spi.h>
28 #include <sys/simd.h>
29 #include <modes/modes.h>
30 #include <aes/aes_impl.h>
31 
32 /*
33  * Initialize AES encryption and decryption key schedules.
34  *
35  * Parameters:
36  * cipherKey	User key
37  * keyBits	AES key size (128, 192, or 256 bits)
38  * keysched	AES key schedule to be initialized, of type aes_key_t.
39  *		Allocated by aes_alloc_keysched().
40  */
41 void
42 aes_init_keysched(const uint8_t *cipherKey, uint_t keyBits, void *keysched)
43 {
44 	const aes_impl_ops_t *ops = aes_impl_get_ops();
45 	aes_key_t *newbie = keysched;
46 	uint_t keysize, i, j;
47 	union {
48 		uint64_t	ka64[4];
49 		uint32_t	ka32[8];
50 	} keyarr;
51 
52 	switch (keyBits) {
53 	case 128:
54 		newbie->nr = 10;
55 		break;
56 
57 	case 192:
58 		newbie->nr = 12;
59 		break;
60 
61 	case 256:
62 		newbie->nr = 14;
63 		break;
64 
65 	default:
66 		/* should never get here */
67 		return;
68 	}
69 	keysize = CRYPTO_BITS2BYTES(keyBits);
70 
71 	/*
72 	 * Generic C implementation requires byteswap for little endian
73 	 * machines, various accelerated implementations for various
74 	 * architectures may not.
75 	 */
76 	if (!ops->needs_byteswap) {
77 		/* no byteswap needed */
78 		if (IS_P2ALIGNED(cipherKey, sizeof (uint64_t))) {
79 			for (i = 0, j = 0; j < keysize; i++, j += 8) {
80 				/* LINTED: pointer alignment */
81 				keyarr.ka64[i] = *((uint64_t *)&cipherKey[j]);
82 			}
83 		} else {
84 			memcpy(keyarr.ka32, cipherKey, keysize);
85 		}
86 	} else {
87 		/* byte swap */
88 		for (i = 0, j = 0; j < keysize; i++, j += 4) {
89 			keyarr.ka32[i] =
90 			    htonl(*(uint32_t *)(void *)&cipherKey[j]);
91 		}
92 	}
93 
94 	ops->generate(newbie, keyarr.ka32, keyBits);
95 	newbie->ops = ops;
96 
97 	/*
98 	 * Note: if there are systems that need the AES_64BIT_KS type in the
99 	 * future, move setting key schedule type to individual implementations
100 	 */
101 	newbie->type = AES_32BIT_KS;
102 }
103 
104 
105 /*
106  * Encrypt one block using AES.
107  * Align if needed and (for x86 32-bit only) byte-swap.
108  *
109  * Parameters:
110  * ks	Key schedule, of type aes_key_t
111  * pt	Input block (plain text)
112  * ct	Output block (crypto text).  Can overlap with pt
113  */
114 int
115 aes_encrypt_block(const void *ks, const uint8_t *pt, uint8_t *ct)
116 {
117 	aes_key_t	*ksch = (aes_key_t *)ks;
118 	const aes_impl_ops_t	*ops = ksch->ops;
119 
120 	if (IS_P2ALIGNED2(pt, ct, sizeof (uint32_t)) && !ops->needs_byteswap) {
121 		/* LINTED:  pointer alignment */
122 		ops->encrypt(&ksch->encr_ks.ks32[0], ksch->nr,
123 		    /* LINTED:  pointer alignment */
124 		    (uint32_t *)pt, (uint32_t *)ct);
125 	} else {
126 		uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)];
127 
128 		/* Copy input block into buffer */
129 		if (ops->needs_byteswap) {
130 			buffer[0] = htonl(*(uint32_t *)(void *)&pt[0]);
131 			buffer[1] = htonl(*(uint32_t *)(void *)&pt[4]);
132 			buffer[2] = htonl(*(uint32_t *)(void *)&pt[8]);
133 			buffer[3] = htonl(*(uint32_t *)(void *)&pt[12]);
134 		} else
135 			memcpy(&buffer, pt, AES_BLOCK_LEN);
136 
137 		ops->encrypt(&ksch->encr_ks.ks32[0], ksch->nr, buffer, buffer);
138 
139 		/* Copy result from buffer to output block */
140 		if (ops->needs_byteswap) {
141 			*(uint32_t *)(void *)&ct[0] = htonl(buffer[0]);
142 			*(uint32_t *)(void *)&ct[4] = htonl(buffer[1]);
143 			*(uint32_t *)(void *)&ct[8] = htonl(buffer[2]);
144 			*(uint32_t *)(void *)&ct[12] = htonl(buffer[3]);
145 		} else
146 			memcpy(ct, &buffer, AES_BLOCK_LEN);
147 	}
148 	return (CRYPTO_SUCCESS);
149 }
150 
151 
152 /*
153  * Decrypt one block using AES.
154  * Align and byte-swap if needed.
155  *
156  * Parameters:
157  * ks	Key schedule, of type aes_key_t
158  * ct	Input block (crypto text)
159  * pt	Output block (plain text). Can overlap with pt
160  */
161 int
162 aes_decrypt_block(const void *ks, const uint8_t *ct, uint8_t *pt)
163 {
164 	aes_key_t	*ksch = (aes_key_t *)ks;
165 	const aes_impl_ops_t	*ops = ksch->ops;
166 
167 	if (IS_P2ALIGNED2(ct, pt, sizeof (uint32_t)) && !ops->needs_byteswap) {
168 		/* LINTED:  pointer alignment */
169 		ops->decrypt(&ksch->decr_ks.ks32[0], ksch->nr,
170 		    /* LINTED:  pointer alignment */
171 		    (uint32_t *)ct, (uint32_t *)pt);
172 	} else {
173 		uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)];
174 
175 		/* Copy input block into buffer */
176 		if (ops->needs_byteswap) {
177 			buffer[0] = htonl(*(uint32_t *)(void *)&ct[0]);
178 			buffer[1] = htonl(*(uint32_t *)(void *)&ct[4]);
179 			buffer[2] = htonl(*(uint32_t *)(void *)&ct[8]);
180 			buffer[3] = htonl(*(uint32_t *)(void *)&ct[12]);
181 		} else
182 			memcpy(&buffer, ct, AES_BLOCK_LEN);
183 
184 		ops->decrypt(&ksch->decr_ks.ks32[0], ksch->nr, buffer, buffer);
185 
186 		/* Copy result from buffer to output block */
187 		if (ops->needs_byteswap) {
188 			*(uint32_t *)(void *)&pt[0] = htonl(buffer[0]);
189 			*(uint32_t *)(void *)&pt[4] = htonl(buffer[1]);
190 			*(uint32_t *)(void *)&pt[8] = htonl(buffer[2]);
191 			*(uint32_t *)(void *)&pt[12] = htonl(buffer[3]);
192 		} else
193 			memcpy(pt, &buffer, AES_BLOCK_LEN);
194 	}
195 	return (CRYPTO_SUCCESS);
196 }
197 
198 
199 /*
200  * Allocate key schedule for AES.
201  *
202  * Return the pointer and set size to the number of bytes allocated.
203  * Memory allocated must be freed by the caller when done.
204  *
205  * Parameters:
206  * size		Size of key schedule allocated, in bytes
207  * kmflag	Flag passed to kmem_alloc(9F); ignored in userland.
208  */
209 void *
210 aes_alloc_keysched(size_t *size, int kmflag)
211 {
212 	aes_key_t *keysched;
213 
214 	keysched = kmem_alloc(sizeof (aes_key_t), kmflag);
215 	if (keysched != NULL) {
216 		*size = sizeof (aes_key_t);
217 		return (keysched);
218 	}
219 	return (NULL);
220 }
221 
222 /* AES implementation that contains the fastest methods */
223 static aes_impl_ops_t aes_fastest_impl = {
224 	.name = "fastest"
225 };
226 
227 /* All compiled in implementations */
228 static const aes_impl_ops_t *aes_all_impl[] = {
229 	&aes_generic_impl,
230 #if defined(__x86_64)
231 	&aes_x86_64_impl,
232 #endif
233 #if defined(__x86_64) && defined(HAVE_AES)
234 	&aes_aesni_impl,
235 #endif
236 };
237 
238 /* Indicate that benchmark has been completed */
239 static boolean_t aes_impl_initialized = B_FALSE;
240 
241 /* Select aes implementation */
242 #define	IMPL_FASTEST	(UINT32_MAX)
243 #define	IMPL_CYCLE	(UINT32_MAX-1)
244 
245 #define	AES_IMPL_READ(i) (*(volatile uint32_t *) &(i))
246 
247 static uint32_t icp_aes_impl = IMPL_FASTEST;
248 static uint32_t user_sel_impl = IMPL_FASTEST;
249 
250 /* Hold all supported implementations */
251 static size_t aes_supp_impl_cnt = 0;
252 static aes_impl_ops_t *aes_supp_impl[ARRAY_SIZE(aes_all_impl)];
253 
254 /*
255  * Returns the AES operations for encrypt/decrypt/key setup.  When a
256  * SIMD implementation is not allowed in the current context, then
257  * fallback to the fastest generic implementation.
258  */
259 const aes_impl_ops_t *
260 aes_impl_get_ops(void)
261 {
262 	if (!kfpu_allowed())
263 		return (&aes_generic_impl);
264 
265 	const aes_impl_ops_t *ops = NULL;
266 	const uint32_t impl = AES_IMPL_READ(icp_aes_impl);
267 
268 	switch (impl) {
269 	case IMPL_FASTEST:
270 		ASSERT(aes_impl_initialized);
271 		ops = &aes_fastest_impl;
272 		break;
273 	case IMPL_CYCLE:
274 		/* Cycle through supported implementations */
275 		ASSERT(aes_impl_initialized);
276 		ASSERT3U(aes_supp_impl_cnt, >, 0);
277 		static size_t cycle_impl_idx = 0;
278 		size_t idx = (++cycle_impl_idx) % aes_supp_impl_cnt;
279 		ops = aes_supp_impl[idx];
280 		break;
281 	default:
282 		ASSERT3U(impl, <, aes_supp_impl_cnt);
283 		ASSERT3U(aes_supp_impl_cnt, >, 0);
284 		if (impl < ARRAY_SIZE(aes_all_impl))
285 			ops = aes_supp_impl[impl];
286 		break;
287 	}
288 
289 	ASSERT3P(ops, !=, NULL);
290 
291 	return (ops);
292 }
293 
294 /*
295  * Initialize all supported implementations.
296  */
297 void
298 aes_impl_init(void)
299 {
300 	aes_impl_ops_t *curr_impl;
301 	int i, c;
302 
303 	/* Move supported implementations into aes_supp_impls */
304 	for (i = 0, c = 0; i < ARRAY_SIZE(aes_all_impl); i++) {
305 		curr_impl = (aes_impl_ops_t *)aes_all_impl[i];
306 
307 		if (curr_impl->is_supported())
308 			aes_supp_impl[c++] = (aes_impl_ops_t *)curr_impl;
309 	}
310 	aes_supp_impl_cnt = c;
311 
312 	/*
313 	 * Set the fastest implementation given the assumption that the
314 	 * hardware accelerated version is the fastest.
315 	 */
316 #if defined(__x86_64)
317 #if defined(HAVE_AES)
318 	if (aes_aesni_impl.is_supported()) {
319 		memcpy(&aes_fastest_impl, &aes_aesni_impl,
320 		    sizeof (aes_fastest_impl));
321 	} else
322 #endif
323 	{
324 		memcpy(&aes_fastest_impl, &aes_x86_64_impl,
325 		    sizeof (aes_fastest_impl));
326 	}
327 #else
328 	memcpy(&aes_fastest_impl, &aes_generic_impl,
329 	    sizeof (aes_fastest_impl));
330 #endif
331 
332 	strlcpy(aes_fastest_impl.name, "fastest", AES_IMPL_NAME_MAX);
333 
334 	/* Finish initialization */
335 	atomic_swap_32(&icp_aes_impl, user_sel_impl);
336 	aes_impl_initialized = B_TRUE;
337 }
338 
339 static const struct {
340 	const char *name;
341 	uint32_t sel;
342 } aes_impl_opts[] = {
343 		{ "cycle",	IMPL_CYCLE },
344 		{ "fastest",	IMPL_FASTEST },
345 };
346 
347 /*
348  * Function sets desired aes implementation.
349  *
350  * If we are called before init(), user preference will be saved in
351  * user_sel_impl, and applied in later init() call. This occurs when module
352  * parameter is specified on module load. Otherwise, directly update
353  * icp_aes_impl.
354  *
355  * @val		Name of aes implementation to use
356  * @param	Unused.
357  */
358 int
359 aes_impl_set(const char *val)
360 {
361 	int err = -EINVAL;
362 	char req_name[AES_IMPL_NAME_MAX];
363 	uint32_t impl = AES_IMPL_READ(user_sel_impl);
364 	size_t i;
365 
366 	/* sanitize input */
367 	i = strnlen(val, AES_IMPL_NAME_MAX);
368 	if (i == 0 || i >= AES_IMPL_NAME_MAX)
369 		return (err);
370 
371 	strlcpy(req_name, val, AES_IMPL_NAME_MAX);
372 	while (i > 0 && isspace(req_name[i-1]))
373 		i--;
374 	req_name[i] = '\0';
375 
376 	/* Check mandatory options */
377 	for (i = 0; i < ARRAY_SIZE(aes_impl_opts); i++) {
378 		if (strcmp(req_name, aes_impl_opts[i].name) == 0) {
379 			impl = aes_impl_opts[i].sel;
380 			err = 0;
381 			break;
382 		}
383 	}
384 
385 	/* check all supported impl if init() was already called */
386 	if (err != 0 && aes_impl_initialized) {
387 		/* check all supported implementations */
388 		for (i = 0; i < aes_supp_impl_cnt; i++) {
389 			if (strcmp(req_name, aes_supp_impl[i]->name) == 0) {
390 				impl = i;
391 				err = 0;
392 				break;
393 			}
394 		}
395 	}
396 
397 	if (err == 0) {
398 		if (aes_impl_initialized)
399 			atomic_swap_32(&icp_aes_impl, impl);
400 		else
401 			atomic_swap_32(&user_sel_impl, impl);
402 	}
403 
404 	return (err);
405 }
406 
407 #if defined(_KERNEL) && defined(__linux__)
408 
409 static int
410 icp_aes_impl_set(const char *val, zfs_kernel_param_t *kp)
411 {
412 	return (aes_impl_set(val));
413 }
414 
415 static int
416 icp_aes_impl_get(char *buffer, zfs_kernel_param_t *kp)
417 {
418 	int i, cnt = 0;
419 	char *fmt;
420 	const uint32_t impl = AES_IMPL_READ(icp_aes_impl);
421 
422 	ASSERT(aes_impl_initialized);
423 
424 	/* list mandatory options */
425 	for (i = 0; i < ARRAY_SIZE(aes_impl_opts); i++) {
426 		fmt = (impl == aes_impl_opts[i].sel) ? "[%s] " : "%s ";
427 		cnt += kmem_scnprintf(buffer + cnt, PAGE_SIZE - cnt, fmt,
428 		    aes_impl_opts[i].name);
429 	}
430 
431 	/* list all supported implementations */
432 	for (i = 0; i < aes_supp_impl_cnt; i++) {
433 		fmt = (i == impl) ? "[%s] " : "%s ";
434 		cnt += kmem_scnprintf(buffer + cnt, PAGE_SIZE - cnt, fmt,
435 		    aes_supp_impl[i]->name);
436 	}
437 
438 	return (cnt);
439 }
440 
441 module_param_call(icp_aes_impl, icp_aes_impl_set, icp_aes_impl_get,
442     NULL, 0644);
443 MODULE_PARM_DESC(icp_aes_impl, "Select aes implementation.");
444 #endif
445