1 /*-
2 * Copyright (c) 2005-2008 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * Copyright (c) 2010 Konstantin Belousov <kib@FreeBSD.org>
4 * Copyright (c) 2014-2021 The FreeBSD Foundation
5 * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
6 * All rights reserved.
7 *
8 * Portions of this software were developed by John-Mark Gurney
9 * under sponsorship of the FreeBSD Foundation and
10 * Rubicon Communications, LLC (Netgate).
11 *
12 * Portions of this software were developed by Ararat River
13 * Consulting, LLC under sponsorship of the FreeBSD Foundation.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 */
36
37 #include <sys/param.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/kobj.h>
41 #include <sys/libkern.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/module.h>
45 #include <sys/smp.h>
46 #include <sys/systm.h>
47 #include <sys/uio.h>
48
49 #include <crypto/aesni/aesni.h>
50 #include <crypto/aesni/sha_sse.h>
51 #include <crypto/sha1.h>
52 #include <crypto/sha2/sha224.h>
53 #include <crypto/sha2/sha256.h>
54
55 #include <opencrypto/cryptodev.h>
56 #include <opencrypto/gmac.h>
57 #include <cryptodev_if.h>
58
59 #include <machine/md_var.h>
60 #include <machine/specialreg.h>
61 #include <machine/fpu.h>
62
63 struct aesni_softc {
64 int32_t cid;
65 bool has_aes;
66 bool has_sha;
67 };
68
69 static int aesni_cipher_setup(struct aesni_session *ses,
70 const struct crypto_session_params *csp);
71 static int aesni_cipher_process(struct aesni_session *ses, struct cryptop *crp);
72 static int aesni_cipher_crypt(struct aesni_session *ses, struct cryptop *crp,
73 const struct crypto_session_params *csp);
74 static int aesni_cipher_mac(struct aesni_session *ses, struct cryptop *crp,
75 const struct crypto_session_params *csp);
76
77 MALLOC_DEFINE(M_AESNI, "aesni_data", "AESNI Data");
78
79 static void
aesni_identify(driver_t * drv,device_t parent)80 aesni_identify(driver_t *drv, device_t parent)
81 {
82
83 /* NB: order 10 is so we get attached after h/w devices */
84 if (device_find_child(parent, "aesni", -1) == NULL &&
85 BUS_ADD_CHILD(parent, 10, "aesni", -1) == 0)
86 panic("aesni: could not attach");
87 }
88
89 static void
detect_cpu_features(bool * has_aes,bool * has_sha)90 detect_cpu_features(bool *has_aes, bool *has_sha)
91 {
92
93 *has_aes = ((cpu_feature2 & CPUID2_AESNI) != 0 &&
94 (cpu_feature2 & CPUID2_SSE41) != 0);
95 *has_sha = ((cpu_stdext_feature & CPUID_STDEXT_SHA) != 0 &&
96 (cpu_feature2 & CPUID2_SSSE3) != 0);
97 }
98
99 static int
aesni_probe(device_t dev)100 aesni_probe(device_t dev)
101 {
102 bool has_aes, has_sha;
103
104 detect_cpu_features(&has_aes, &has_sha);
105 if (!has_aes && !has_sha) {
106 device_printf(dev, "No AES or SHA support.\n");
107 return (EINVAL);
108 } else if (has_aes && has_sha)
109 device_set_desc(dev,
110 "AES-CBC,AES-CCM,AES-GCM,AES-ICM,AES-XTS,SHA1,SHA256");
111 else if (has_aes)
112 device_set_desc(dev,
113 "AES-CBC,AES-CCM,AES-GCM,AES-ICM,AES-XTS");
114 else
115 device_set_desc(dev, "SHA1,SHA256");
116
117 return (0);
118 }
119
120 static int
aesni_attach(device_t dev)121 aesni_attach(device_t dev)
122 {
123 struct aesni_softc *sc;
124
125 sc = device_get_softc(dev);
126
127 sc->cid = crypto_get_driverid(dev, sizeof(struct aesni_session),
128 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC |
129 CRYPTOCAP_F_ACCEL_SOFTWARE);
130 if (sc->cid < 0) {
131 device_printf(dev, "Could not get crypto driver id.\n");
132 return (ENOMEM);
133 }
134
135 detect_cpu_features(&sc->has_aes, &sc->has_sha);
136 return (0);
137 }
138
139 static int
aesni_detach(device_t dev)140 aesni_detach(device_t dev)
141 {
142 struct aesni_softc *sc;
143
144 sc = device_get_softc(dev);
145
146 crypto_unregister_all(sc->cid);
147
148 return (0);
149 }
150
151 static bool
aesni_auth_supported(struct aesni_softc * sc,const struct crypto_session_params * csp)152 aesni_auth_supported(struct aesni_softc *sc,
153 const struct crypto_session_params *csp)
154 {
155
156 if (!sc->has_sha)
157 return (false);
158
159 switch (csp->csp_auth_alg) {
160 case CRYPTO_SHA1:
161 case CRYPTO_SHA2_224:
162 case CRYPTO_SHA2_256:
163 case CRYPTO_SHA1_HMAC:
164 case CRYPTO_SHA2_224_HMAC:
165 case CRYPTO_SHA2_256_HMAC:
166 break;
167 default:
168 return (false);
169 }
170
171 return (true);
172 }
173
174 static bool
aesni_cipher_supported(struct aesni_softc * sc,const struct crypto_session_params * csp)175 aesni_cipher_supported(struct aesni_softc *sc,
176 const struct crypto_session_params *csp)
177 {
178
179 if (!sc->has_aes)
180 return (false);
181
182 switch (csp->csp_cipher_alg) {
183 case CRYPTO_AES_CBC:
184 case CRYPTO_AES_ICM:
185 switch (csp->csp_cipher_klen * 8) {
186 case 128:
187 case 192:
188 case 256:
189 break;
190 default:
191 CRYPTDEB("invalid CBC/ICM key length");
192 return (false);
193 }
194 if (csp->csp_ivlen != AES_BLOCK_LEN)
195 return (false);
196 break;
197 case CRYPTO_AES_XTS:
198 switch (csp->csp_cipher_klen * 8) {
199 case 256:
200 case 512:
201 break;
202 default:
203 CRYPTDEB("invalid XTS key length");
204 return (false);
205 }
206 if (csp->csp_ivlen != AES_XTS_IV_LEN)
207 return (false);
208 break;
209 default:
210 return (false);
211 }
212
213 return (true);
214 }
215
216 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN)
217
218 static int
aesni_probesession(device_t dev,const struct crypto_session_params * csp)219 aesni_probesession(device_t dev, const struct crypto_session_params *csp)
220 {
221 struct aesni_softc *sc;
222
223 sc = device_get_softc(dev);
224 if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0)
225 return (EINVAL);
226 switch (csp->csp_mode) {
227 case CSP_MODE_DIGEST:
228 if (!aesni_auth_supported(sc, csp))
229 return (EINVAL);
230 break;
231 case CSP_MODE_CIPHER:
232 if (!aesni_cipher_supported(sc, csp))
233 return (EINVAL);
234 break;
235 case CSP_MODE_AEAD:
236 switch (csp->csp_cipher_alg) {
237 case CRYPTO_AES_NIST_GCM_16:
238 switch (csp->csp_cipher_klen * 8) {
239 case 128:
240 case 192:
241 case 256:
242 break;
243 default:
244 CRYPTDEB("invalid GCM key length");
245 return (EINVAL);
246 }
247 if (csp->csp_auth_mlen != 0 &&
248 csp->csp_auth_mlen != GMAC_DIGEST_LEN)
249 return (EINVAL);
250 if (!sc->has_aes)
251 return (EINVAL);
252 break;
253 case CRYPTO_AES_CCM_16:
254 switch (csp->csp_cipher_klen * 8) {
255 case 128:
256 case 192:
257 case 256:
258 break;
259 default:
260 CRYPTDEB("invalid CCM key length");
261 return (EINVAL);
262 }
263 if (!sc->has_aes)
264 return (EINVAL);
265 break;
266 default:
267 return (EINVAL);
268 }
269 break;
270 case CSP_MODE_ETA:
271 if (!aesni_auth_supported(sc, csp) ||
272 !aesni_cipher_supported(sc, csp))
273 return (EINVAL);
274 break;
275 default:
276 return (EINVAL);
277 }
278
279 return (CRYPTODEV_PROBE_ACCEL_SOFTWARE);
280 }
281
282 static int
aesni_newsession(device_t dev,crypto_session_t cses,const struct crypto_session_params * csp)283 aesni_newsession(device_t dev, crypto_session_t cses,
284 const struct crypto_session_params *csp)
285 {
286 struct aesni_session *ses;
287 int error;
288
289 ses = crypto_get_driver_session(cses);
290
291 switch (csp->csp_mode) {
292 case CSP_MODE_DIGEST:
293 case CSP_MODE_CIPHER:
294 case CSP_MODE_AEAD:
295 case CSP_MODE_ETA:
296 break;
297 default:
298 return (EINVAL);
299 }
300 error = aesni_cipher_setup(ses, csp);
301 if (error != 0) {
302 CRYPTDEB("setup failed");
303 return (error);
304 }
305
306 return (0);
307 }
308
309 static int
aesni_process(device_t dev,struct cryptop * crp,int hint __unused)310 aesni_process(device_t dev, struct cryptop *crp, int hint __unused)
311 {
312 struct aesni_session *ses;
313 int error;
314
315 ses = crypto_get_driver_session(crp->crp_session);
316
317 error = aesni_cipher_process(ses, crp);
318
319 crp->crp_etype = error;
320 crypto_done(crp);
321 return (0);
322 }
323
324 static uint8_t *
aesni_cipher_alloc(struct cryptop * crp,int start,int length,bool * allocated)325 aesni_cipher_alloc(struct cryptop *crp, int start, int length, bool *allocated)
326 {
327 uint8_t *addr;
328
329 addr = crypto_contiguous_subsegment(crp, start, length);
330 if (addr != NULL) {
331 *allocated = false;
332 return (addr);
333 }
334 addr = malloc(length, M_AESNI, M_NOWAIT);
335 if (addr != NULL) {
336 *allocated = true;
337 crypto_copydata(crp, start, length, addr);
338 } else
339 *allocated = false;
340 return (addr);
341 }
342
343 static device_method_t aesni_methods[] = {
344 DEVMETHOD(device_identify, aesni_identify),
345 DEVMETHOD(device_probe, aesni_probe),
346 DEVMETHOD(device_attach, aesni_attach),
347 DEVMETHOD(device_detach, aesni_detach),
348
349 DEVMETHOD(cryptodev_probesession, aesni_probesession),
350 DEVMETHOD(cryptodev_newsession, aesni_newsession),
351 DEVMETHOD(cryptodev_process, aesni_process),
352
353 DEVMETHOD_END
354 };
355
356 static driver_t aesni_driver = {
357 "aesni",
358 aesni_methods,
359 sizeof(struct aesni_softc),
360 };
361
362 DRIVER_MODULE(aesni, nexus, aesni_driver, 0, 0);
363 MODULE_VERSION(aesni, 1);
364 MODULE_DEPEND(aesni, crypto, 1, 1, 1);
365
366 static int
intel_sha1_update(void * vctx,const void * vdata,u_int datalen)367 intel_sha1_update(void *vctx, const void *vdata, u_int datalen)
368 {
369 struct sha1_ctxt *ctx = vctx;
370 const char *data = vdata;
371 size_t gaplen;
372 size_t gapstart;
373 size_t off;
374 size_t copysiz;
375 u_int blocks;
376
377 off = 0;
378 /* Do any aligned blocks without redundant copying. */
379 if (datalen >= 64 && ctx->count % 64 == 0) {
380 blocks = datalen / 64;
381 ctx->c.b64[0] += blocks * 64 * 8;
382 intel_sha1_step(ctx->h.b32, data + off, blocks);
383 off += blocks * 64;
384 }
385
386 while (off < datalen) {
387 gapstart = ctx->count % 64;
388 gaplen = 64 - gapstart;
389
390 copysiz = (gaplen < datalen - off) ? gaplen : datalen - off;
391 bcopy(&data[off], &ctx->m.b8[gapstart], copysiz);
392 ctx->count += copysiz;
393 ctx->count %= 64;
394 ctx->c.b64[0] += copysiz * 8;
395 if (ctx->count % 64 == 0)
396 intel_sha1_step(ctx->h.b32, (void *)ctx->m.b8, 1);
397 off += copysiz;
398 }
399
400 return (0);
401 }
402
403 static void
SHA1_Init_fn(void * ctx)404 SHA1_Init_fn(void *ctx)
405 {
406 sha1_init(ctx);
407 }
408
409 static void
SHA1_Finalize_fn(void * digest,void * ctx)410 SHA1_Finalize_fn(void *digest, void *ctx)
411 {
412 sha1_result(ctx, digest);
413 }
414
415 static int
intel_sha256_update(void * vctx,const void * vdata,u_int len)416 intel_sha256_update(void *vctx, const void *vdata, u_int len)
417 {
418 SHA256_CTX *ctx = vctx;
419 uint64_t bitlen;
420 uint32_t r;
421 u_int blocks;
422 const unsigned char *src = vdata;
423
424 /* Number of bytes left in the buffer from previous updates */
425 r = (ctx->count >> 3) & 0x3f;
426
427 /* Convert the length into a number of bits */
428 bitlen = len << 3;
429
430 /* Update number of bits */
431 ctx->count += bitlen;
432
433 /* Handle the case where we don't need to perform any transforms */
434 if (len < 64 - r) {
435 memcpy(&ctx->buf[r], src, len);
436 return (0);
437 }
438
439 /* Finish the current block */
440 memcpy(&ctx->buf[r], src, 64 - r);
441 intel_sha256_step(ctx->state, ctx->buf, 1);
442 src += 64 - r;
443 len -= 64 - r;
444
445 /* Perform complete blocks */
446 if (len >= 64) {
447 blocks = len / 64;
448 intel_sha256_step(ctx->state, src, blocks);
449 src += blocks * 64;
450 len -= blocks * 64;
451 }
452
453 /* Copy left over data into buffer */
454 memcpy(ctx->buf, src, len);
455
456 return (0);
457 }
458
459 static void
SHA224_Init_fn(void * ctx)460 SHA224_Init_fn(void *ctx)
461 {
462 SHA224_Init(ctx);
463 }
464
465 static void
SHA224_Finalize_fn(void * digest,void * ctx)466 SHA224_Finalize_fn(void *digest, void *ctx)
467 {
468 SHA224_Final(digest, ctx);
469 }
470
471 static void
SHA256_Init_fn(void * ctx)472 SHA256_Init_fn(void *ctx)
473 {
474 SHA256_Init(ctx);
475 }
476
477 static void
SHA256_Finalize_fn(void * digest,void * ctx)478 SHA256_Finalize_fn(void *digest, void *ctx)
479 {
480 SHA256_Final(digest, ctx);
481 }
482
483 static int
aesni_authprepare(struct aesni_session * ses,int klen)484 aesni_authprepare(struct aesni_session *ses, int klen)
485 {
486
487 if (klen > SHA1_BLOCK_LEN)
488 return (EINVAL);
489 if ((ses->hmac && klen == 0) || (!ses->hmac && klen != 0))
490 return (EINVAL);
491 return (0);
492 }
493
494 static int
aesni_cipher_setup(struct aesni_session * ses,const struct crypto_session_params * csp)495 aesni_cipher_setup(struct aesni_session *ses,
496 const struct crypto_session_params *csp)
497 {
498 uint8_t *schedbase;
499 int error;
500 bool kt;
501
502 schedbase = (uint8_t *)roundup2((uintptr_t)ses->schedules,
503 AES_SCHED_ALIGN);
504 ses->enc_schedule = schedbase;
505 ses->dec_schedule = schedbase + AES_SCHED_LEN;
506 ses->xts_schedule = schedbase + AES_SCHED_LEN * 2;
507
508 switch (csp->csp_auth_alg) {
509 case CRYPTO_SHA1_HMAC:
510 ses->hmac = true;
511 /* FALLTHROUGH */
512 case CRYPTO_SHA1:
513 ses->hash_len = SHA1_HASH_LEN;
514 ses->hash_init = SHA1_Init_fn;
515 ses->hash_update = intel_sha1_update;
516 ses->hash_finalize = SHA1_Finalize_fn;
517 break;
518 case CRYPTO_SHA2_224_HMAC:
519 ses->hmac = true;
520 /* FALLTHROUGH */
521 case CRYPTO_SHA2_224:
522 ses->hash_len = SHA2_224_HASH_LEN;
523 ses->hash_init = SHA224_Init_fn;
524 ses->hash_update = intel_sha256_update;
525 ses->hash_finalize = SHA224_Finalize_fn;
526 break;
527 case CRYPTO_SHA2_256_HMAC:
528 ses->hmac = true;
529 /* FALLTHROUGH */
530 case CRYPTO_SHA2_256:
531 ses->hash_len = SHA2_256_HASH_LEN;
532 ses->hash_init = SHA256_Init_fn;
533 ses->hash_update = intel_sha256_update;
534 ses->hash_finalize = SHA256_Finalize_fn;
535 break;
536 }
537
538 if (ses->hash_len != 0) {
539 if (csp->csp_auth_mlen == 0)
540 ses->mlen = ses->hash_len;
541 else
542 ses->mlen = csp->csp_auth_mlen;
543
544 error = aesni_authprepare(ses, csp->csp_auth_klen);
545 if (error != 0)
546 return (error);
547 } else if (csp->csp_cipher_alg == CRYPTO_AES_CCM_16) {
548 if (csp->csp_auth_mlen == 0)
549 ses->mlen = AES_CBC_MAC_HASH_LEN;
550 else
551 ses->mlen = csp->csp_auth_mlen;
552 }
553
554 kt = (csp->csp_cipher_alg == 0);
555 if (!kt) {
556 fpu_kern_enter(curthread, NULL,
557 FPU_KERN_NORMAL | FPU_KERN_NOCTX);
558 }
559
560 error = 0;
561 if (csp->csp_cipher_key != NULL)
562 aesni_cipher_setup_common(ses, csp, csp->csp_cipher_key,
563 csp->csp_cipher_klen);
564
565 if (!kt) {
566 fpu_kern_leave(curthread, NULL);
567 }
568 return (error);
569 }
570
571 static int
aesni_cipher_process(struct aesni_session * ses,struct cryptop * crp)572 aesni_cipher_process(struct aesni_session *ses, struct cryptop *crp)
573 {
574 const struct crypto_session_params *csp;
575 int error;
576
577 csp = crypto_get_params(crp->crp_session);
578 switch (csp->csp_cipher_alg) {
579 case CRYPTO_AES_CCM_16:
580 if (crp->crp_payload_length > ccm_max_payload_length(csp))
581 return (EMSGSIZE);
582 /* FALLTHROUGH */
583 case CRYPTO_AES_ICM:
584 case CRYPTO_AES_NIST_GCM_16:
585 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
586 return (EINVAL);
587 break;
588 case CRYPTO_AES_CBC:
589 case CRYPTO_AES_XTS:
590 /* CBC & XTS can only handle full blocks for now */
591 if ((crp->crp_payload_length % AES_BLOCK_LEN) != 0)
592 return (EINVAL);
593 break;
594 }
595
596 /* Do work */
597 if (csp->csp_mode == CSP_MODE_ETA) {
598 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
599 error = aesni_cipher_crypt(ses, crp, csp);
600 if (error == 0)
601 error = aesni_cipher_mac(ses, crp, csp);
602 } else {
603 error = aesni_cipher_mac(ses, crp, csp);
604 if (error == 0)
605 error = aesni_cipher_crypt(ses, crp, csp);
606 }
607 } else if (csp->csp_mode == CSP_MODE_DIGEST)
608 error = aesni_cipher_mac(ses, crp, csp);
609 else
610 error = aesni_cipher_crypt(ses, crp, csp);
611
612 return (error);
613 }
614
615 static int
aesni_cipher_crypt(struct aesni_session * ses,struct cryptop * crp,const struct crypto_session_params * csp)616 aesni_cipher_crypt(struct aesni_session *ses, struct cryptop *crp,
617 const struct crypto_session_params *csp)
618 {
619 uint8_t iv[AES_BLOCK_LEN], tag[GMAC_DIGEST_LEN];
620 uint8_t *authbuf, *buf, *outbuf;
621 int error;
622 bool encflag, allocated, authallocated, outallocated, outcopy;
623
624 if (crp->crp_payload_length == 0) {
625 buf = NULL;
626 allocated = false;
627 } else {
628 buf = aesni_cipher_alloc(crp, crp->crp_payload_start,
629 crp->crp_payload_length, &allocated);
630 if (buf == NULL)
631 return (ENOMEM);
632 }
633
634 outallocated = false;
635 authallocated = false;
636 authbuf = NULL;
637 if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16 ||
638 csp->csp_cipher_alg == CRYPTO_AES_CCM_16) {
639 if (crp->crp_aad_length == 0) {
640 authbuf = NULL;
641 } else if (crp->crp_aad != NULL) {
642 authbuf = crp->crp_aad;
643 } else {
644 authbuf = aesni_cipher_alloc(crp, crp->crp_aad_start,
645 crp->crp_aad_length, &authallocated);
646 if (authbuf == NULL) {
647 error = ENOMEM;
648 goto out;
649 }
650 }
651 }
652
653 if (CRYPTO_HAS_OUTPUT_BUFFER(crp) && crp->crp_payload_length > 0) {
654 outbuf = crypto_buffer_contiguous_subsegment(&crp->crp_obuf,
655 crp->crp_payload_output_start, crp->crp_payload_length);
656 if (outbuf == NULL) {
657 outcopy = true;
658 if (allocated)
659 outbuf = buf;
660 else {
661 outbuf = malloc(crp->crp_payload_length,
662 M_AESNI, M_NOWAIT);
663 if (outbuf == NULL) {
664 error = ENOMEM;
665 goto out;
666 }
667 outallocated = true;
668 }
669 } else
670 outcopy = false;
671 } else {
672 outbuf = buf;
673 outcopy = allocated;
674 }
675
676 fpu_kern_enter(curthread, NULL, FPU_KERN_NORMAL | FPU_KERN_NOCTX);
677
678 error = 0;
679 encflag = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
680 if (crp->crp_cipher_key != NULL)
681 aesni_cipher_setup_common(ses, csp, crp->crp_cipher_key,
682 csp->csp_cipher_klen);
683
684 crypto_read_iv(crp, iv);
685
686 switch (csp->csp_cipher_alg) {
687 case CRYPTO_AES_CBC:
688 if (encflag)
689 aesni_encrypt_cbc(ses->rounds, ses->enc_schedule,
690 crp->crp_payload_length, buf, outbuf, iv);
691 else {
692 if (buf != outbuf)
693 memcpy(outbuf, buf, crp->crp_payload_length);
694 aesni_decrypt_cbc(ses->rounds, ses->dec_schedule,
695 crp->crp_payload_length, outbuf, iv);
696 }
697 break;
698 case CRYPTO_AES_ICM:
699 /* encryption & decryption are the same */
700 aesni_encrypt_icm(ses->rounds, ses->enc_schedule,
701 crp->crp_payload_length, buf, outbuf, iv);
702 break;
703 case CRYPTO_AES_XTS:
704 if (encflag)
705 aesni_encrypt_xts(ses->rounds, ses->enc_schedule,
706 ses->xts_schedule, crp->crp_payload_length, buf,
707 outbuf, iv);
708 else
709 aesni_decrypt_xts(ses->rounds, ses->dec_schedule,
710 ses->xts_schedule, crp->crp_payload_length, buf,
711 outbuf, iv);
712 break;
713 case CRYPTO_AES_NIST_GCM_16:
714 if (encflag) {
715 memset(tag, 0, sizeof(tag));
716 AES_GCM_encrypt(buf, outbuf, authbuf, iv, tag,
717 crp->crp_payload_length, crp->crp_aad_length,
718 csp->csp_ivlen, ses->enc_schedule, ses->rounds);
719 crypto_copyback(crp, crp->crp_digest_start, sizeof(tag),
720 tag);
721 } else {
722 crypto_copydata(crp, crp->crp_digest_start, sizeof(tag),
723 tag);
724 if (!AES_GCM_decrypt(buf, outbuf, authbuf, iv, tag,
725 crp->crp_payload_length, crp->crp_aad_length,
726 csp->csp_ivlen, ses->enc_schedule, ses->rounds))
727 error = EBADMSG;
728 }
729 break;
730 case CRYPTO_AES_CCM_16:
731 if (encflag) {
732 memset(tag, 0, sizeof(tag));
733 AES_CCM_encrypt(buf, outbuf, authbuf, iv, tag,
734 crp->crp_payload_length, crp->crp_aad_length,
735 csp->csp_ivlen, ses->mlen, ses->enc_schedule,
736 ses->rounds);
737 crypto_copyback(crp, crp->crp_digest_start, ses->mlen,
738 tag);
739 } else {
740 crypto_copydata(crp, crp->crp_digest_start, ses->mlen,
741 tag);
742 if (!AES_CCM_decrypt(buf, outbuf, authbuf, iv, tag,
743 crp->crp_payload_length, crp->crp_aad_length,
744 csp->csp_ivlen, ses->mlen, ses->enc_schedule,
745 ses->rounds))
746 error = EBADMSG;
747 }
748 break;
749 }
750
751 fpu_kern_leave(curthread, NULL);
752
753 if (outcopy && error == 0)
754 crypto_copyback(crp, CRYPTO_HAS_OUTPUT_BUFFER(crp) ?
755 crp->crp_payload_output_start : crp->crp_payload_start,
756 crp->crp_payload_length, outbuf);
757
758 out:
759 if (allocated)
760 zfree(buf, M_AESNI);
761 if (authallocated)
762 zfree(authbuf, M_AESNI);
763 if (outallocated)
764 zfree(outbuf, M_AESNI);
765 explicit_bzero(iv, sizeof(iv));
766 explicit_bzero(tag, sizeof(tag));
767 return (error);
768 }
769
770 static int
aesni_cipher_mac(struct aesni_session * ses,struct cryptop * crp,const struct crypto_session_params * csp)771 aesni_cipher_mac(struct aesni_session *ses, struct cryptop *crp,
772 const struct crypto_session_params *csp)
773 {
774 union {
775 struct SHA256Context sha2 __aligned(16);
776 struct sha1_ctxt sha1 __aligned(16);
777 } sctx;
778 uint32_t res[SHA2_256_HASH_LEN / sizeof(uint32_t)];
779 const uint8_t *key;
780 int i, keylen;
781
782 if (crp->crp_auth_key != NULL)
783 key = crp->crp_auth_key;
784 else
785 key = csp->csp_auth_key;
786 keylen = csp->csp_auth_klen;
787
788 fpu_kern_enter(curthread, NULL, FPU_KERN_NORMAL | FPU_KERN_NOCTX);
789
790 if (ses->hmac) {
791 uint8_t hmac_key[SHA1_BLOCK_LEN] __aligned(16);
792
793 /* Inner hash: (K ^ IPAD) || data */
794 ses->hash_init(&sctx);
795 for (i = 0; i < keylen; i++)
796 hmac_key[i] = key[i] ^ HMAC_IPAD_VAL;
797 for (i = keylen; i < sizeof(hmac_key); i++)
798 hmac_key[i] = 0 ^ HMAC_IPAD_VAL;
799 ses->hash_update(&sctx, hmac_key, sizeof(hmac_key));
800
801 if (crp->crp_aad != NULL)
802 ses->hash_update(&sctx, crp->crp_aad,
803 crp->crp_aad_length);
804 else
805 crypto_apply(crp, crp->crp_aad_start,
806 crp->crp_aad_length, ses->hash_update, &sctx);
807 if (CRYPTO_HAS_OUTPUT_BUFFER(crp) &&
808 CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
809 crypto_apply_buf(&crp->crp_obuf,
810 crp->crp_payload_output_start,
811 crp->crp_payload_length,
812 ses->hash_update, &sctx);
813 else
814 crypto_apply(crp, crp->crp_payload_start,
815 crp->crp_payload_length, ses->hash_update, &sctx);
816
817 if (csp->csp_flags & CSP_F_ESN)
818 ses->hash_update(&sctx, crp->crp_esn, 4);
819
820 ses->hash_finalize(res, &sctx);
821
822 /* Outer hash: (K ^ OPAD) || inner hash */
823 ses->hash_init(&sctx);
824 for (i = 0; i < keylen; i++)
825 hmac_key[i] = key[i] ^ HMAC_OPAD_VAL;
826 for (i = keylen; i < sizeof(hmac_key); i++)
827 hmac_key[i] = 0 ^ HMAC_OPAD_VAL;
828 ses->hash_update(&sctx, hmac_key, sizeof(hmac_key));
829 ses->hash_update(&sctx, res, ses->hash_len);
830 ses->hash_finalize(res, &sctx);
831 explicit_bzero(hmac_key, sizeof(hmac_key));
832 } else {
833 ses->hash_init(&sctx);
834
835 if (crp->crp_aad != NULL)
836 ses->hash_update(&sctx, crp->crp_aad,
837 crp->crp_aad_length);
838 else
839 crypto_apply(crp, crp->crp_aad_start,
840 crp->crp_aad_length, ses->hash_update, &sctx);
841 if (CRYPTO_HAS_OUTPUT_BUFFER(crp) &&
842 CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
843 crypto_apply_buf(&crp->crp_obuf,
844 crp->crp_payload_output_start,
845 crp->crp_payload_length,
846 ses->hash_update, &sctx);
847 else
848 crypto_apply(crp, crp->crp_payload_start,
849 crp->crp_payload_length,
850 ses->hash_update, &sctx);
851
852 ses->hash_finalize(res, &sctx);
853 }
854
855 fpu_kern_leave(curthread, NULL);
856
857 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
858 uint32_t res2[SHA2_256_HASH_LEN / sizeof(uint32_t)];
859
860 crypto_copydata(crp, crp->crp_digest_start, ses->mlen, res2);
861 if (timingsafe_bcmp(res, res2, ses->mlen) != 0)
862 return (EBADMSG);
863 explicit_bzero(res2, sizeof(res2));
864 } else
865 crypto_copyback(crp, crp->crp_digest_start, ses->mlen, res);
866 explicit_bzero(res, sizeof(res));
867 return (0);
868 }
869