1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ 2 3 /*- 4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 5 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting 6 * 7 * This code was written by Angelos D. Keromytis in Athens, Greece, in 8 * February 2000. Network Security Technologies Inc. (NSTI) kindly 9 * supported the development of this code. 10 * 11 * Copyright (c) 2000, 2001 Angelos D. Keromytis 12 * Copyright (c) 2014 The FreeBSD Foundation 13 * All rights reserved. 14 * 15 * Portions of this software were developed by John-Mark Gurney 16 * under sponsorship of the FreeBSD Foundation and 17 * Rubicon Communications, LLC (Netgate). 18 * 19 * Permission to use, copy, and modify this software with or without fee 20 * is hereby granted, provided that this entire notice is included in 21 * all source code copies of any software which is or includes a copy or 22 * modification of this software. 23 * 24 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 25 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 26 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 27 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 28 * PURPOSE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/module.h> 39 #include <sys/sysctl.h> 40 #include <sys/errno.h> 41 #include <sys/random.h> 42 #include <sys/kernel.h> 43 #include <sys/uio.h> 44 #include <sys/lock.h> 45 #include <sys/rwlock.h> 46 #include <sys/endian.h> 47 #include <sys/limits.h> 48 #include <sys/mutex.h> 49 50 #include <crypto/sha1.h> 51 #include <opencrypto/rmd160.h> 52 53 #include <opencrypto/cryptodev.h> 54 #include <opencrypto/xform.h> 55 56 #include <sys/kobj.h> 57 #include <sys/bus.h> 58 #include "cryptodev_if.h" 59 60 struct swcr_auth { 61 void *sw_ictx; 62 void *sw_octx; 63 struct auth_hash *sw_axf; 64 uint16_t sw_mlen; 65 }; 66 67 struct swcr_encdec { 68 void *sw_kschedule; 69 struct enc_xform *sw_exf; 70 }; 71 72 struct swcr_compdec { 73 struct comp_algo *sw_cxf; 74 }; 75 76 struct swcr_session { 77 struct mtx swcr_lock; 78 int (*swcr_process)(struct swcr_session *, struct cryptop *); 79 80 struct swcr_auth swcr_auth; 81 struct swcr_encdec swcr_encdec; 82 struct swcr_compdec swcr_compdec; 83 }; 84 85 static int32_t swcr_id; 86 87 static void swcr_freesession(device_t dev, crypto_session_t cses); 88 89 /* Used for CRYPTO_NULL_CBC. */ 90 static int 91 swcr_null(struct swcr_session *ses, struct cryptop *crp) 92 { 93 94 return (0); 95 } 96 97 /* 98 * Apply a symmetric encryption/decryption algorithm. 99 */ 100 static int 101 swcr_encdec(struct swcr_session *ses, struct cryptop *crp) 102 { 103 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN]; 104 unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN]; 105 const struct crypto_session_params *csp; 106 struct swcr_encdec *sw; 107 struct enc_xform *exf; 108 int i, blks, inlen, ivlen, outlen, resid; 109 struct crypto_buffer_cursor cc_in, cc_out; 110 const unsigned char *inblk; 111 unsigned char *outblk; 112 int error; 113 bool encrypting; 114 115 error = 0; 116 117 sw = &ses->swcr_encdec; 118 exf = sw->sw_exf; 119 ivlen = exf->ivsize; 120 121 if (exf->native_blocksize == 0) { 122 /* Check for non-padded data */ 123 if ((crp->crp_payload_length % exf->blocksize) != 0) 124 return (EINVAL); 125 126 blks = exf->blocksize; 127 } else 128 blks = exf->native_blocksize; 129 130 if (exf == &enc_xform_aes_icm && 131 (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 132 return (EINVAL); 133 134 if (crp->crp_cipher_key != NULL) { 135 csp = crypto_get_params(crp->crp_session); 136 error = exf->setkey(sw->sw_kschedule, 137 crp->crp_cipher_key, csp->csp_cipher_klen); 138 if (error) 139 return (error); 140 } 141 142 crypto_read_iv(crp, iv); 143 144 if (exf->reinit) { 145 /* 146 * xforms that provide a reinit method perform all IV 147 * handling themselves. 148 */ 149 exf->reinit(sw->sw_kschedule, iv); 150 } 151 152 ivp = iv; 153 154 crypto_cursor_init(&cc_in, &crp->crp_buf); 155 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 156 inlen = crypto_cursor_seglen(&cc_in); 157 inblk = crypto_cursor_segbase(&cc_in); 158 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 159 crypto_cursor_init(&cc_out, &crp->crp_obuf); 160 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); 161 } else 162 cc_out = cc_in; 163 outlen = crypto_cursor_seglen(&cc_out); 164 outblk = crypto_cursor_segbase(&cc_out); 165 166 resid = crp->crp_payload_length; 167 encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); 168 169 /* 170 * Loop through encrypting blocks. 'inlen' is the remaining 171 * length of the current segment in the input buffer. 172 * 'outlen' is the remaining length of current segment in the 173 * output buffer. 174 */ 175 while (resid >= blks) { 176 /* 177 * If the current block is not contained within the 178 * current input/output segment, use 'blk' as a local 179 * buffer. 180 */ 181 if (inlen < blks) { 182 crypto_cursor_copydata(&cc_in, blks, blk); 183 inblk = blk; 184 } 185 if (outlen < blks) 186 outblk = blk; 187 188 /* 189 * Ciphers without a 'reinit' hook are assumed to be 190 * used in CBC mode where the chaining is done here. 191 */ 192 if (exf->reinit != NULL) { 193 if (encrypting) 194 exf->encrypt(sw->sw_kschedule, inblk, outblk); 195 else 196 exf->decrypt(sw->sw_kschedule, inblk, outblk); 197 } else if (encrypting) { 198 /* XOR with previous block */ 199 for (i = 0; i < blks; i++) 200 outblk[i] = inblk[i] ^ ivp[i]; 201 202 exf->encrypt(sw->sw_kschedule, outblk, outblk); 203 204 /* 205 * Keep encrypted block for XOR'ing 206 * with next block 207 */ 208 memcpy(iv, outblk, blks); 209 ivp = iv; 210 } else { /* decrypt */ 211 /* 212 * Keep encrypted block for XOR'ing 213 * with next block 214 */ 215 nivp = (ivp == iv) ? iv2 : iv; 216 memcpy(nivp, inblk, blks); 217 218 exf->decrypt(sw->sw_kschedule, inblk, outblk); 219 220 /* XOR with previous block */ 221 for (i = 0; i < blks; i++) 222 outblk[i] ^= ivp[i]; 223 224 ivp = nivp; 225 } 226 227 if (inlen < blks) { 228 inlen = crypto_cursor_seglen(&cc_in); 229 inblk = crypto_cursor_segbase(&cc_in); 230 } else { 231 crypto_cursor_advance(&cc_in, blks); 232 inlen -= blks; 233 inblk += blks; 234 } 235 236 if (outlen < blks) { 237 crypto_cursor_copyback(&cc_out, blks, blk); 238 outlen = crypto_cursor_seglen(&cc_out); 239 outblk = crypto_cursor_segbase(&cc_out); 240 } else { 241 crypto_cursor_advance(&cc_out, blks); 242 outlen -= blks; 243 outblk += blks; 244 } 245 246 resid -= blks; 247 } 248 249 /* Handle trailing partial block for stream ciphers. */ 250 if (resid > 0) { 251 KASSERT(exf->native_blocksize != 0, 252 ("%s: partial block of %d bytes for cipher %s", 253 __func__, i, exf->name)); 254 KASSERT(exf->reinit != NULL, 255 ("%s: partial block cipher %s without reinit hook", 256 __func__, exf->name)); 257 KASSERT(resid < blks, ("%s: partial block too big", __func__)); 258 259 inlen = crypto_cursor_seglen(&cc_in); 260 outlen = crypto_cursor_seglen(&cc_out); 261 if (inlen < resid) { 262 crypto_cursor_copydata(&cc_in, resid, blk); 263 inblk = blk; 264 } else 265 inblk = crypto_cursor_segbase(&cc_in); 266 if (outlen < resid) 267 outblk = blk; 268 else 269 outblk = crypto_cursor_segbase(&cc_out); 270 if (encrypting) 271 exf->encrypt_last(sw->sw_kschedule, inblk, outblk, 272 resid); 273 else 274 exf->decrypt_last(sw->sw_kschedule, inblk, outblk, 275 resid); 276 if (outlen < resid) 277 crypto_cursor_copyback(&cc_out, resid, blk); 278 } 279 280 explicit_bzero(blk, sizeof(blk)); 281 explicit_bzero(iv, sizeof(iv)); 282 explicit_bzero(iv2, sizeof(iv2)); 283 return (0); 284 } 285 286 static void 287 swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw, 288 const uint8_t *key, int klen) 289 { 290 291 switch (axf->type) { 292 case CRYPTO_SHA1_HMAC: 293 case CRYPTO_SHA2_224_HMAC: 294 case CRYPTO_SHA2_256_HMAC: 295 case CRYPTO_SHA2_384_HMAC: 296 case CRYPTO_SHA2_512_HMAC: 297 case CRYPTO_NULL_HMAC: 298 case CRYPTO_RIPEMD160_HMAC: 299 hmac_init_ipad(axf, key, klen, sw->sw_ictx); 300 hmac_init_opad(axf, key, klen, sw->sw_octx); 301 break; 302 case CRYPTO_POLY1305: 303 case CRYPTO_BLAKE2B: 304 case CRYPTO_BLAKE2S: 305 axf->Setkey(sw->sw_ictx, key, klen); 306 axf->Init(sw->sw_ictx); 307 break; 308 default: 309 panic("%s: algorithm %d doesn't use keys", __func__, axf->type); 310 } 311 } 312 313 /* 314 * Compute or verify hash. 315 */ 316 static int 317 swcr_authcompute(struct swcr_session *ses, struct cryptop *crp) 318 { 319 u_char aalg[HASH_MAX_LEN]; 320 const struct crypto_session_params *csp; 321 struct swcr_auth *sw; 322 struct auth_hash *axf; 323 union authctx ctx; 324 int err; 325 326 sw = &ses->swcr_auth; 327 328 axf = sw->sw_axf; 329 330 if (crp->crp_auth_key != NULL) { 331 csp = crypto_get_params(crp->crp_session); 332 swcr_authprepare(axf, sw, crp->crp_auth_key, 333 csp->csp_auth_klen); 334 } 335 336 bcopy(sw->sw_ictx, &ctx, axf->ctxsize); 337 338 if (crp->crp_aad != NULL) 339 err = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); 340 else 341 err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, 342 axf->Update, &ctx); 343 if (err) 344 return err; 345 346 if (CRYPTO_HAS_OUTPUT_BUFFER(crp) && 347 CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 348 err = crypto_apply_buf(&crp->crp_obuf, 349 crp->crp_payload_output_start, crp->crp_payload_length, 350 axf->Update, &ctx); 351 else 352 err = crypto_apply(crp, crp->crp_payload_start, 353 crp->crp_payload_length, axf->Update, &ctx); 354 if (err) 355 return err; 356 357 switch (axf->type) { 358 case CRYPTO_SHA1: 359 case CRYPTO_SHA2_224: 360 case CRYPTO_SHA2_256: 361 case CRYPTO_SHA2_384: 362 case CRYPTO_SHA2_512: 363 axf->Final(aalg, &ctx); 364 break; 365 366 case CRYPTO_SHA1_HMAC: 367 case CRYPTO_SHA2_224_HMAC: 368 case CRYPTO_SHA2_256_HMAC: 369 case CRYPTO_SHA2_384_HMAC: 370 case CRYPTO_SHA2_512_HMAC: 371 case CRYPTO_RIPEMD160_HMAC: 372 if (sw->sw_octx == NULL) 373 return EINVAL; 374 375 axf->Final(aalg, &ctx); 376 bcopy(sw->sw_octx, &ctx, axf->ctxsize); 377 axf->Update(&ctx, aalg, axf->hashsize); 378 axf->Final(aalg, &ctx); 379 break; 380 381 case CRYPTO_BLAKE2B: 382 case CRYPTO_BLAKE2S: 383 case CRYPTO_NULL_HMAC: 384 case CRYPTO_POLY1305: 385 axf->Final(aalg, &ctx); 386 break; 387 } 388 389 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 390 u_char uaalg[HASH_MAX_LEN]; 391 392 crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg); 393 if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0) 394 err = EBADMSG; 395 explicit_bzero(uaalg, sizeof(uaalg)); 396 } else { 397 /* Inject the authentication data */ 398 crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg); 399 } 400 explicit_bzero(aalg, sizeof(aalg)); 401 return (err); 402 } 403 404 CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */ 405 CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */ 406 407 static int 408 swcr_gmac(struct swcr_session *ses, struct cryptop *crp) 409 { 410 uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; 411 u_char *blk = (u_char *)blkbuf; 412 u_char tag[GMAC_DIGEST_LEN]; 413 u_char iv[AES_BLOCK_LEN]; 414 struct crypto_buffer_cursor cc; 415 const u_char *inblk; 416 union authctx ctx; 417 struct swcr_auth *swa; 418 struct auth_hash *axf; 419 uint32_t *blkp; 420 int blksz, error, ivlen, len, resid; 421 422 swa = &ses->swcr_auth; 423 axf = swa->sw_axf; 424 425 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 426 blksz = GMAC_BLOCK_LEN; 427 KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", 428 __func__)); 429 430 /* Initialize the IV */ 431 ivlen = AES_GCM_IV_LEN; 432 crypto_read_iv(crp, iv); 433 434 axf->Reinit(&ctx, iv, ivlen); 435 crypto_cursor_init(&cc, &crp->crp_buf); 436 crypto_cursor_advance(&cc, crp->crp_payload_start); 437 for (resid = crp->crp_payload_length; resid >= blksz; resid -= len) { 438 len = crypto_cursor_seglen(&cc); 439 if (len >= blksz) { 440 inblk = crypto_cursor_segbase(&cc); 441 len = rounddown(MIN(len, resid), blksz); 442 crypto_cursor_advance(&cc, len); 443 } else { 444 len = blksz; 445 crypto_cursor_copydata(&cc, len, blk); 446 inblk = blk; 447 } 448 axf->Update(&ctx, inblk, len); 449 } 450 if (resid > 0) { 451 memset(blk, 0, blksz); 452 crypto_cursor_copydata(&cc, resid, blk); 453 axf->Update(&ctx, blk, blksz); 454 } 455 456 /* length block */ 457 memset(blk, 0, blksz); 458 blkp = (uint32_t *)blk + 1; 459 *blkp = htobe32(crp->crp_payload_length * 8); 460 axf->Update(&ctx, blk, blksz); 461 462 /* Finalize MAC */ 463 axf->Final(tag, &ctx); 464 465 error = 0; 466 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 467 u_char tag2[GMAC_DIGEST_LEN]; 468 469 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, 470 tag2); 471 if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0) 472 error = EBADMSG; 473 explicit_bzero(tag2, sizeof(tag2)); 474 } else { 475 /* Inject the authentication data */ 476 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 477 } 478 explicit_bzero(blkbuf, sizeof(blkbuf)); 479 explicit_bzero(tag, sizeof(tag)); 480 explicit_bzero(iv, sizeof(iv)); 481 return (error); 482 } 483 484 static int 485 swcr_gcm(struct swcr_session *ses, struct cryptop *crp) 486 { 487 uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; 488 u_char *blk = (u_char *)blkbuf; 489 u_char tag[GMAC_DIGEST_LEN]; 490 u_char iv[AES_BLOCK_LEN]; 491 struct crypto_buffer_cursor cc_in, cc_out; 492 const u_char *inblk; 493 u_char *outblk; 494 union authctx ctx; 495 struct swcr_auth *swa; 496 struct swcr_encdec *swe; 497 struct auth_hash *axf; 498 struct enc_xform *exf; 499 uint32_t *blkp; 500 int blksz, error, ivlen, len, r, resid; 501 502 swa = &ses->swcr_auth; 503 axf = swa->sw_axf; 504 505 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 506 blksz = GMAC_BLOCK_LEN; 507 KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", 508 __func__)); 509 510 swe = &ses->swcr_encdec; 511 exf = swe->sw_exf; 512 KASSERT(axf->blocksize == exf->native_blocksize, 513 ("%s: blocksize mismatch", __func__)); 514 515 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 516 return (EINVAL); 517 518 /* Initialize the IV */ 519 ivlen = AES_GCM_IV_LEN; 520 bcopy(crp->crp_iv, iv, ivlen); 521 522 /* Supply MAC with IV */ 523 axf->Reinit(&ctx, iv, ivlen); 524 525 /* Supply MAC with AAD */ 526 if (crp->crp_aad != NULL) { 527 len = rounddown(crp->crp_aad_length, blksz); 528 if (len != 0) 529 axf->Update(&ctx, crp->crp_aad, len); 530 if (crp->crp_aad_length != len) { 531 memset(blk, 0, blksz); 532 memcpy(blk, (char *)crp->crp_aad + len, 533 crp->crp_aad_length - len); 534 axf->Update(&ctx, blk, blksz); 535 } 536 } else { 537 crypto_cursor_init(&cc_in, &crp->crp_buf); 538 crypto_cursor_advance(&cc_in, crp->crp_aad_start); 539 for (resid = crp->crp_aad_length; resid >= blksz; 540 resid -= len) { 541 len = crypto_cursor_seglen(&cc_in); 542 if (len >= blksz) { 543 inblk = crypto_cursor_segbase(&cc_in); 544 len = rounddown(MIN(len, resid), blksz); 545 crypto_cursor_advance(&cc_in, len); 546 } else { 547 len = blksz; 548 crypto_cursor_copydata(&cc_in, len, blk); 549 inblk = blk; 550 } 551 axf->Update(&ctx, inblk, len); 552 } 553 if (resid > 0) { 554 memset(blk, 0, blksz); 555 crypto_cursor_copydata(&cc_in, resid, blk); 556 axf->Update(&ctx, blk, blksz); 557 } 558 } 559 560 exf->reinit(swe->sw_kschedule, iv); 561 562 /* Do encryption with MAC */ 563 crypto_cursor_init(&cc_in, &crp->crp_buf); 564 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 565 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 566 crypto_cursor_init(&cc_out, &crp->crp_obuf); 567 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); 568 } else 569 cc_out = cc_in; 570 for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { 571 if (crypto_cursor_seglen(&cc_in) < blksz) { 572 crypto_cursor_copydata(&cc_in, blksz, blk); 573 inblk = blk; 574 } else { 575 inblk = crypto_cursor_segbase(&cc_in); 576 crypto_cursor_advance(&cc_in, blksz); 577 } 578 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 579 if (crypto_cursor_seglen(&cc_out) < blksz) 580 outblk = blk; 581 else 582 outblk = crypto_cursor_segbase(&cc_out); 583 exf->encrypt(swe->sw_kschedule, inblk, outblk); 584 axf->Update(&ctx, outblk, blksz); 585 if (outblk == blk) 586 crypto_cursor_copyback(&cc_out, blksz, blk); 587 else 588 crypto_cursor_advance(&cc_out, blksz); 589 } else { 590 axf->Update(&ctx, inblk, blksz); 591 } 592 } 593 if (resid > 0) { 594 crypto_cursor_copydata(&cc_in, resid, blk); 595 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 596 exf->encrypt_last(swe->sw_kschedule, blk, blk, resid); 597 crypto_cursor_copyback(&cc_out, resid, blk); 598 } 599 axf->Update(&ctx, blk, resid); 600 } 601 602 /* length block */ 603 memset(blk, 0, blksz); 604 blkp = (uint32_t *)blk + 1; 605 *blkp = htobe32(crp->crp_aad_length * 8); 606 blkp = (uint32_t *)blk + 3; 607 *blkp = htobe32(crp->crp_payload_length * 8); 608 axf->Update(&ctx, blk, blksz); 609 610 /* Finalize MAC */ 611 axf->Final(tag, &ctx); 612 613 /* Validate tag */ 614 error = 0; 615 if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 616 u_char tag2[GMAC_DIGEST_LEN]; 617 618 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); 619 620 r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); 621 explicit_bzero(tag2, sizeof(tag2)); 622 if (r != 0) { 623 error = EBADMSG; 624 goto out; 625 } 626 627 /* tag matches, decrypt data */ 628 crypto_cursor_init(&cc_in, &crp->crp_buf); 629 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 630 for (resid = crp->crp_payload_length; resid > blksz; 631 resid -= blksz) { 632 if (crypto_cursor_seglen(&cc_in) < blksz) { 633 crypto_cursor_copydata(&cc_in, blksz, blk); 634 inblk = blk; 635 } else { 636 inblk = crypto_cursor_segbase(&cc_in); 637 crypto_cursor_advance(&cc_in, blksz); 638 } 639 if (crypto_cursor_seglen(&cc_out) < blksz) 640 outblk = blk; 641 else 642 outblk = crypto_cursor_segbase(&cc_out); 643 exf->decrypt(swe->sw_kschedule, inblk, outblk); 644 if (outblk == blk) 645 crypto_cursor_copyback(&cc_out, blksz, blk); 646 else 647 crypto_cursor_advance(&cc_out, blksz); 648 } 649 if (resid > 0) { 650 crypto_cursor_copydata(&cc_in, resid, blk); 651 exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); 652 crypto_cursor_copyback(&cc_out, resid, blk); 653 } 654 } else { 655 /* Inject the authentication data */ 656 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 657 } 658 659 out: 660 explicit_bzero(blkbuf, sizeof(blkbuf)); 661 explicit_bzero(tag, sizeof(tag)); 662 explicit_bzero(iv, sizeof(iv)); 663 664 return (error); 665 } 666 667 static int 668 swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp) 669 { 670 u_char tag[AES_CBC_MAC_HASH_LEN]; 671 u_char iv[AES_BLOCK_LEN]; 672 union authctx ctx; 673 struct swcr_auth *swa; 674 struct auth_hash *axf; 675 int error, ivlen; 676 677 swa = &ses->swcr_auth; 678 axf = swa->sw_axf; 679 680 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 681 682 /* Initialize the IV */ 683 ivlen = AES_CCM_IV_LEN; 684 crypto_read_iv(crp, iv); 685 686 /* 687 * AES CCM-CBC-MAC needs to know the length of both the auth 688 * data and payload data before doing the auth computation. 689 */ 690 ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length; 691 ctx.aes_cbc_mac_ctx.cryptDataLength = 0; 692 693 axf->Reinit(&ctx, iv, ivlen); 694 if (crp->crp_aad != NULL) 695 error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); 696 else 697 error = crypto_apply(crp, crp->crp_payload_start, 698 crp->crp_payload_length, axf->Update, &ctx); 699 if (error) 700 return (error); 701 702 /* Finalize MAC */ 703 axf->Final(tag, &ctx); 704 705 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 706 u_char tag2[AES_CBC_MAC_HASH_LEN]; 707 708 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, 709 tag2); 710 if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0) 711 error = EBADMSG; 712 explicit_bzero(tag2, sizeof(tag)); 713 } else { 714 /* Inject the authentication data */ 715 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 716 } 717 explicit_bzero(tag, sizeof(tag)); 718 explicit_bzero(iv, sizeof(iv)); 719 return (error); 720 } 721 722 static int 723 swcr_ccm(struct swcr_session *ses, struct cryptop *crp) 724 { 725 uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; 726 u_char *blk = (u_char *)blkbuf; 727 u_char tag[AES_CBC_MAC_HASH_LEN]; 728 u_char iv[AES_BLOCK_LEN]; 729 struct crypto_buffer_cursor cc_in, cc_out; 730 const u_char *inblk; 731 u_char *outblk; 732 union authctx ctx; 733 struct swcr_auth *swa; 734 struct swcr_encdec *swe; 735 struct auth_hash *axf; 736 struct enc_xform *exf; 737 int blksz, error, ivlen, r, resid; 738 739 swa = &ses->swcr_auth; 740 axf = swa->sw_axf; 741 742 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 743 blksz = AES_BLOCK_LEN; 744 KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", 745 __func__)); 746 747 swe = &ses->swcr_encdec; 748 exf = swe->sw_exf; 749 KASSERT(axf->blocksize == exf->native_blocksize, 750 ("%s: blocksize mismatch", __func__)); 751 752 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 753 return (EINVAL); 754 755 /* Initialize the IV */ 756 ivlen = AES_CCM_IV_LEN; 757 bcopy(crp->crp_iv, iv, ivlen); 758 759 /* 760 * AES CCM-CBC-MAC needs to know the length of both the auth 761 * data and payload data before doing the auth computation. 762 */ 763 ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length; 764 ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length; 765 766 /* Supply MAC with IV */ 767 axf->Reinit(&ctx, iv, ivlen); 768 769 /* Supply MAC with AAD */ 770 if (crp->crp_aad != NULL) 771 error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); 772 else 773 error = crypto_apply(crp, crp->crp_aad_start, 774 crp->crp_aad_length, axf->Update, &ctx); 775 if (error) 776 return (error); 777 778 exf->reinit(swe->sw_kschedule, iv); 779 780 /* Do encryption/decryption with MAC */ 781 crypto_cursor_init(&cc_in, &crp->crp_buf); 782 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 783 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 784 crypto_cursor_init(&cc_out, &crp->crp_obuf); 785 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); 786 } else 787 cc_out = cc_in; 788 for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { 789 if (crypto_cursor_seglen(&cc_in) < blksz) { 790 crypto_cursor_copydata(&cc_in, blksz, blk); 791 inblk = blk; 792 } else { 793 inblk = crypto_cursor_segbase(&cc_in); 794 crypto_cursor_advance(&cc_in, blksz); 795 } 796 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 797 if (crypto_cursor_seglen(&cc_out) < blksz) 798 outblk = blk; 799 else 800 outblk = crypto_cursor_segbase(&cc_out); 801 axf->Update(&ctx, inblk, blksz); 802 exf->encrypt(swe->sw_kschedule, inblk, outblk); 803 if (outblk == blk) 804 crypto_cursor_copyback(&cc_out, blksz, blk); 805 else 806 crypto_cursor_advance(&cc_out, blksz); 807 } else { 808 /* 809 * One of the problems with CCM+CBC is that 810 * the authentication is done on the 811 * unencrypted data. As a result, we have to 812 * decrypt the data twice: once to generate 813 * the tag and a second time after the tag is 814 * verified. 815 */ 816 exf->decrypt(swe->sw_kschedule, inblk, blk); 817 axf->Update(&ctx, blk, blksz); 818 } 819 } 820 if (resid > 0) { 821 crypto_cursor_copydata(&cc_in, resid, blk); 822 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 823 axf->Update(&ctx, blk, resid); 824 exf->encrypt_last(swe->sw_kschedule, blk, blk, resid); 825 crypto_cursor_copyback(&cc_out, resid, blk); 826 } else { 827 exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); 828 axf->Update(&ctx, blk, resid); 829 } 830 } 831 832 /* Finalize MAC */ 833 axf->Final(tag, &ctx); 834 835 /* Validate tag */ 836 error = 0; 837 if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 838 u_char tag2[AES_CBC_MAC_HASH_LEN]; 839 840 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, 841 tag2); 842 843 r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); 844 explicit_bzero(tag2, sizeof(tag2)); 845 if (r != 0) { 846 error = EBADMSG; 847 goto out; 848 } 849 850 /* tag matches, decrypt data */ 851 exf->reinit(swe->sw_kschedule, iv); 852 crypto_cursor_init(&cc_in, &crp->crp_buf); 853 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 854 for (resid = crp->crp_payload_length; resid > blksz; 855 resid -= blksz) { 856 if (crypto_cursor_seglen(&cc_in) < blksz) { 857 crypto_cursor_copydata(&cc_in, blksz, blk); 858 inblk = blk; 859 } else { 860 inblk = crypto_cursor_segbase(&cc_in); 861 crypto_cursor_advance(&cc_in, blksz); 862 } 863 if (crypto_cursor_seglen(&cc_out) < blksz) 864 outblk = blk; 865 else 866 outblk = crypto_cursor_segbase(&cc_out); 867 exf->decrypt(swe->sw_kschedule, inblk, outblk); 868 if (outblk == blk) 869 crypto_cursor_copyback(&cc_out, blksz, blk); 870 else 871 crypto_cursor_advance(&cc_out, blksz); 872 } 873 if (resid > 0) { 874 crypto_cursor_copydata(&cc_in, resid, blk); 875 exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); 876 crypto_cursor_copyback(&cc_out, resid, blk); 877 } 878 } else { 879 /* Inject the authentication data */ 880 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 881 } 882 883 out: 884 explicit_bzero(blkbuf, sizeof(blkbuf)); 885 explicit_bzero(tag, sizeof(tag)); 886 explicit_bzero(iv, sizeof(iv)); 887 return (error); 888 } 889 890 /* 891 * Apply a cipher and a digest to perform EtA. 892 */ 893 static int 894 swcr_eta(struct swcr_session *ses, struct cryptop *crp) 895 { 896 int error; 897 898 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 899 error = swcr_encdec(ses, crp); 900 if (error == 0) 901 error = swcr_authcompute(ses, crp); 902 } else { 903 error = swcr_authcompute(ses, crp); 904 if (error == 0) 905 error = swcr_encdec(ses, crp); 906 } 907 return (error); 908 } 909 910 /* 911 * Apply a compression/decompression algorithm 912 */ 913 static int 914 swcr_compdec(struct swcr_session *ses, struct cryptop *crp) 915 { 916 u_int8_t *data, *out; 917 struct comp_algo *cxf; 918 int adj; 919 u_int32_t result; 920 921 cxf = ses->swcr_compdec.sw_cxf; 922 923 /* We must handle the whole buffer of data in one time 924 * then if there is not all the data in the mbuf, we must 925 * copy in a buffer. 926 */ 927 928 data = malloc(crp->crp_payload_length, M_CRYPTO_DATA, M_NOWAIT); 929 if (data == NULL) 930 return (EINVAL); 931 crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, 932 data); 933 934 if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) 935 result = cxf->compress(data, crp->crp_payload_length, &out); 936 else 937 result = cxf->decompress(data, crp->crp_payload_length, &out); 938 939 free(data, M_CRYPTO_DATA); 940 if (result == 0) 941 return (EINVAL); 942 crp->crp_olen = result; 943 944 /* Check the compressed size when doing compression */ 945 if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) { 946 if (result >= crp->crp_payload_length) { 947 /* Compression was useless, we lost time */ 948 free(out, M_CRYPTO_DATA); 949 return (0); 950 } 951 } 952 953 /* Copy back the (de)compressed data. m_copyback is 954 * extending the mbuf as necessary. 955 */ 956 crypto_copyback(crp, crp->crp_payload_start, result, out); 957 if (result < crp->crp_payload_length) { 958 switch (crp->crp_buf.cb_type) { 959 case CRYPTO_BUF_MBUF: 960 adj = result - crp->crp_payload_length; 961 m_adj(crp->crp_buf.cb_mbuf, adj); 962 break; 963 case CRYPTO_BUF_UIO: { 964 struct uio *uio = crp->crp_buf.cb_uio; 965 int ind; 966 967 adj = crp->crp_payload_length - result; 968 ind = uio->uio_iovcnt - 1; 969 970 while (adj > 0 && ind >= 0) { 971 if (adj < uio->uio_iov[ind].iov_len) { 972 uio->uio_iov[ind].iov_len -= adj; 973 break; 974 } 975 976 adj -= uio->uio_iov[ind].iov_len; 977 uio->uio_iov[ind].iov_len = 0; 978 ind--; 979 uio->uio_iovcnt--; 980 } 981 } 982 break; 983 case CRYPTO_BUF_VMPAGE: 984 adj = crp->crp_payload_length - result; 985 crp->crp_buf.cb_vm_page_len -= adj; 986 break; 987 default: 988 break; 989 } 990 } 991 free(out, M_CRYPTO_DATA); 992 return 0; 993 } 994 995 static int 996 swcr_setup_cipher(struct swcr_session *ses, 997 const struct crypto_session_params *csp) 998 { 999 struct swcr_encdec *swe; 1000 struct enc_xform *txf; 1001 int error; 1002 1003 swe = &ses->swcr_encdec; 1004 txf = crypto_cipher(csp); 1005 MPASS(txf->ivsize == csp->csp_ivlen); 1006 if (txf->ctxsize != 0) { 1007 swe->sw_kschedule = malloc(txf->ctxsize, M_CRYPTO_DATA, 1008 M_NOWAIT); 1009 if (swe->sw_kschedule == NULL) 1010 return (ENOMEM); 1011 } 1012 if (csp->csp_cipher_key != NULL) { 1013 error = txf->setkey(swe->sw_kschedule, 1014 csp->csp_cipher_key, csp->csp_cipher_klen); 1015 if (error) 1016 return (error); 1017 } 1018 swe->sw_exf = txf; 1019 return (0); 1020 } 1021 1022 static int 1023 swcr_setup_auth(struct swcr_session *ses, 1024 const struct crypto_session_params *csp) 1025 { 1026 struct swcr_auth *swa; 1027 struct auth_hash *axf; 1028 1029 swa = &ses->swcr_auth; 1030 1031 axf = crypto_auth_hash(csp); 1032 swa->sw_axf = axf; 1033 if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) 1034 return (EINVAL); 1035 if (csp->csp_auth_mlen == 0) 1036 swa->sw_mlen = axf->hashsize; 1037 else 1038 swa->sw_mlen = csp->csp_auth_mlen; 1039 swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); 1040 if (swa->sw_ictx == NULL) 1041 return (ENOBUFS); 1042 1043 switch (csp->csp_auth_alg) { 1044 case CRYPTO_SHA1_HMAC: 1045 case CRYPTO_SHA2_224_HMAC: 1046 case CRYPTO_SHA2_256_HMAC: 1047 case CRYPTO_SHA2_384_HMAC: 1048 case CRYPTO_SHA2_512_HMAC: 1049 case CRYPTO_NULL_HMAC: 1050 case CRYPTO_RIPEMD160_HMAC: 1051 swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, 1052 M_NOWAIT); 1053 if (swa->sw_octx == NULL) 1054 return (ENOBUFS); 1055 1056 if (csp->csp_auth_key != NULL) { 1057 swcr_authprepare(axf, swa, csp->csp_auth_key, 1058 csp->csp_auth_klen); 1059 } 1060 1061 if (csp->csp_mode == CSP_MODE_DIGEST) 1062 ses->swcr_process = swcr_authcompute; 1063 break; 1064 case CRYPTO_SHA1: 1065 case CRYPTO_SHA2_224: 1066 case CRYPTO_SHA2_256: 1067 case CRYPTO_SHA2_384: 1068 case CRYPTO_SHA2_512: 1069 axf->Init(swa->sw_ictx); 1070 if (csp->csp_mode == CSP_MODE_DIGEST) 1071 ses->swcr_process = swcr_authcompute; 1072 break; 1073 case CRYPTO_AES_NIST_GMAC: 1074 axf->Init(swa->sw_ictx); 1075 axf->Setkey(swa->sw_ictx, csp->csp_auth_key, 1076 csp->csp_auth_klen); 1077 if (csp->csp_mode == CSP_MODE_DIGEST) 1078 ses->swcr_process = swcr_gmac; 1079 break; 1080 case CRYPTO_POLY1305: 1081 case CRYPTO_BLAKE2B: 1082 case CRYPTO_BLAKE2S: 1083 /* 1084 * Blake2b and Blake2s support an optional key but do 1085 * not require one. 1086 */ 1087 if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL) 1088 axf->Setkey(swa->sw_ictx, csp->csp_auth_key, 1089 csp->csp_auth_klen); 1090 axf->Init(swa->sw_ictx); 1091 if (csp->csp_mode == CSP_MODE_DIGEST) 1092 ses->swcr_process = swcr_authcompute; 1093 break; 1094 case CRYPTO_AES_CCM_CBC_MAC: 1095 axf->Init(swa->sw_ictx); 1096 axf->Setkey(swa->sw_ictx, csp->csp_auth_key, 1097 csp->csp_auth_klen); 1098 if (csp->csp_mode == CSP_MODE_DIGEST) 1099 ses->swcr_process = swcr_ccm_cbc_mac; 1100 break; 1101 } 1102 1103 return (0); 1104 } 1105 1106 static int 1107 swcr_setup_gcm(struct swcr_session *ses, 1108 const struct crypto_session_params *csp) 1109 { 1110 struct swcr_auth *swa; 1111 struct auth_hash *axf; 1112 1113 if (csp->csp_ivlen != AES_GCM_IV_LEN) 1114 return (EINVAL); 1115 1116 /* First, setup the auth side. */ 1117 swa = &ses->swcr_auth; 1118 switch (csp->csp_cipher_klen * 8) { 1119 case 128: 1120 axf = &auth_hash_nist_gmac_aes_128; 1121 break; 1122 case 192: 1123 axf = &auth_hash_nist_gmac_aes_192; 1124 break; 1125 case 256: 1126 axf = &auth_hash_nist_gmac_aes_256; 1127 break; 1128 default: 1129 return (EINVAL); 1130 } 1131 swa->sw_axf = axf; 1132 if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) 1133 return (EINVAL); 1134 if (csp->csp_auth_mlen == 0) 1135 swa->sw_mlen = axf->hashsize; 1136 else 1137 swa->sw_mlen = csp->csp_auth_mlen; 1138 swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); 1139 if (swa->sw_ictx == NULL) 1140 return (ENOBUFS); 1141 axf->Init(swa->sw_ictx); 1142 if (csp->csp_cipher_key != NULL) 1143 axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, 1144 csp->csp_cipher_klen); 1145 1146 /* Second, setup the cipher side. */ 1147 return (swcr_setup_cipher(ses, csp)); 1148 } 1149 1150 static int 1151 swcr_setup_ccm(struct swcr_session *ses, 1152 const struct crypto_session_params *csp) 1153 { 1154 struct swcr_auth *swa; 1155 struct auth_hash *axf; 1156 1157 if (csp->csp_ivlen != AES_CCM_IV_LEN) 1158 return (EINVAL); 1159 1160 /* First, setup the auth side. */ 1161 swa = &ses->swcr_auth; 1162 switch (csp->csp_cipher_klen * 8) { 1163 case 128: 1164 axf = &auth_hash_ccm_cbc_mac_128; 1165 break; 1166 case 192: 1167 axf = &auth_hash_ccm_cbc_mac_192; 1168 break; 1169 case 256: 1170 axf = &auth_hash_ccm_cbc_mac_256; 1171 break; 1172 default: 1173 return (EINVAL); 1174 } 1175 swa->sw_axf = axf; 1176 if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) 1177 return (EINVAL); 1178 if (csp->csp_auth_mlen == 0) 1179 swa->sw_mlen = axf->hashsize; 1180 else 1181 swa->sw_mlen = csp->csp_auth_mlen; 1182 swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); 1183 if (swa->sw_ictx == NULL) 1184 return (ENOBUFS); 1185 axf->Init(swa->sw_ictx); 1186 if (csp->csp_cipher_key != NULL) 1187 axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, 1188 csp->csp_cipher_klen); 1189 1190 /* Second, setup the cipher side. */ 1191 return (swcr_setup_cipher(ses, csp)); 1192 } 1193 1194 static bool 1195 swcr_auth_supported(const struct crypto_session_params *csp) 1196 { 1197 struct auth_hash *axf; 1198 1199 axf = crypto_auth_hash(csp); 1200 if (axf == NULL) 1201 return (false); 1202 switch (csp->csp_auth_alg) { 1203 case CRYPTO_SHA1_HMAC: 1204 case CRYPTO_SHA2_224_HMAC: 1205 case CRYPTO_SHA2_256_HMAC: 1206 case CRYPTO_SHA2_384_HMAC: 1207 case CRYPTO_SHA2_512_HMAC: 1208 case CRYPTO_NULL_HMAC: 1209 case CRYPTO_RIPEMD160_HMAC: 1210 break; 1211 case CRYPTO_AES_NIST_GMAC: 1212 switch (csp->csp_auth_klen * 8) { 1213 case 128: 1214 case 192: 1215 case 256: 1216 break; 1217 default: 1218 return (false); 1219 } 1220 if (csp->csp_auth_key == NULL) 1221 return (false); 1222 if (csp->csp_ivlen != AES_GCM_IV_LEN) 1223 return (false); 1224 break; 1225 case CRYPTO_POLY1305: 1226 if (csp->csp_auth_klen != POLY1305_KEY_LEN) 1227 return (false); 1228 break; 1229 case CRYPTO_AES_CCM_CBC_MAC: 1230 switch (csp->csp_auth_klen * 8) { 1231 case 128: 1232 case 192: 1233 case 256: 1234 break; 1235 default: 1236 return (false); 1237 } 1238 if (csp->csp_auth_key == NULL) 1239 return (false); 1240 if (csp->csp_ivlen != AES_CCM_IV_LEN) 1241 return (false); 1242 break; 1243 } 1244 return (true); 1245 } 1246 1247 static bool 1248 swcr_cipher_supported(const struct crypto_session_params *csp) 1249 { 1250 struct enc_xform *txf; 1251 1252 txf = crypto_cipher(csp); 1253 if (txf == NULL) 1254 return (false); 1255 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC && 1256 txf->ivsize != csp->csp_ivlen) 1257 return (false); 1258 return (true); 1259 } 1260 1261 static int 1262 swcr_probesession(device_t dev, const struct crypto_session_params *csp) 1263 { 1264 1265 if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) != 1266 0) 1267 return (EINVAL); 1268 switch (csp->csp_mode) { 1269 case CSP_MODE_COMPRESS: 1270 switch (csp->csp_cipher_alg) { 1271 case CRYPTO_DEFLATE_COMP: 1272 break; 1273 default: 1274 return (EINVAL); 1275 } 1276 break; 1277 case CSP_MODE_CIPHER: 1278 switch (csp->csp_cipher_alg) { 1279 case CRYPTO_AES_NIST_GCM_16: 1280 case CRYPTO_AES_CCM_16: 1281 return (EINVAL); 1282 default: 1283 if (!swcr_cipher_supported(csp)) 1284 return (EINVAL); 1285 break; 1286 } 1287 break; 1288 case CSP_MODE_DIGEST: 1289 if (!swcr_auth_supported(csp)) 1290 return (EINVAL); 1291 break; 1292 case CSP_MODE_AEAD: 1293 switch (csp->csp_cipher_alg) { 1294 case CRYPTO_AES_NIST_GCM_16: 1295 case CRYPTO_AES_CCM_16: 1296 break; 1297 default: 1298 return (EINVAL); 1299 } 1300 break; 1301 case CSP_MODE_ETA: 1302 /* AEAD algorithms cannot be used for EtA. */ 1303 switch (csp->csp_cipher_alg) { 1304 case CRYPTO_AES_NIST_GCM_16: 1305 case CRYPTO_AES_CCM_16: 1306 return (EINVAL); 1307 } 1308 switch (csp->csp_auth_alg) { 1309 case CRYPTO_AES_NIST_GMAC: 1310 case CRYPTO_AES_CCM_CBC_MAC: 1311 return (EINVAL); 1312 } 1313 1314 if (!swcr_cipher_supported(csp) || 1315 !swcr_auth_supported(csp)) 1316 return (EINVAL); 1317 break; 1318 default: 1319 return (EINVAL); 1320 } 1321 1322 return (CRYPTODEV_PROBE_SOFTWARE); 1323 } 1324 1325 /* 1326 * Generate a new software session. 1327 */ 1328 static int 1329 swcr_newsession(device_t dev, crypto_session_t cses, 1330 const struct crypto_session_params *csp) 1331 { 1332 struct swcr_session *ses; 1333 struct swcr_encdec *swe; 1334 struct swcr_auth *swa; 1335 struct comp_algo *cxf; 1336 int error; 1337 1338 ses = crypto_get_driver_session(cses); 1339 mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF); 1340 1341 error = 0; 1342 swe = &ses->swcr_encdec; 1343 swa = &ses->swcr_auth; 1344 switch (csp->csp_mode) { 1345 case CSP_MODE_COMPRESS: 1346 switch (csp->csp_cipher_alg) { 1347 case CRYPTO_DEFLATE_COMP: 1348 cxf = &comp_algo_deflate; 1349 break; 1350 #ifdef INVARIANTS 1351 default: 1352 panic("bad compression algo"); 1353 #endif 1354 } 1355 ses->swcr_compdec.sw_cxf = cxf; 1356 ses->swcr_process = swcr_compdec; 1357 break; 1358 case CSP_MODE_CIPHER: 1359 switch (csp->csp_cipher_alg) { 1360 case CRYPTO_NULL_CBC: 1361 ses->swcr_process = swcr_null; 1362 break; 1363 #ifdef INVARIANTS 1364 case CRYPTO_AES_NIST_GCM_16: 1365 case CRYPTO_AES_CCM_16: 1366 panic("bad cipher algo"); 1367 #endif 1368 default: 1369 error = swcr_setup_cipher(ses, csp); 1370 if (error == 0) 1371 ses->swcr_process = swcr_encdec; 1372 } 1373 break; 1374 case CSP_MODE_DIGEST: 1375 error = swcr_setup_auth(ses, csp); 1376 break; 1377 case CSP_MODE_AEAD: 1378 switch (csp->csp_cipher_alg) { 1379 case CRYPTO_AES_NIST_GCM_16: 1380 error = swcr_setup_gcm(ses, csp); 1381 if (error == 0) 1382 ses->swcr_process = swcr_gcm; 1383 break; 1384 case CRYPTO_AES_CCM_16: 1385 error = swcr_setup_ccm(ses, csp); 1386 if (error == 0) 1387 ses->swcr_process = swcr_ccm; 1388 break; 1389 #ifdef INVARIANTS 1390 default: 1391 panic("bad aead algo"); 1392 #endif 1393 } 1394 break; 1395 case CSP_MODE_ETA: 1396 #ifdef INVARIANTS 1397 switch (csp->csp_cipher_alg) { 1398 case CRYPTO_AES_NIST_GCM_16: 1399 case CRYPTO_AES_CCM_16: 1400 panic("bad eta cipher algo"); 1401 } 1402 switch (csp->csp_auth_alg) { 1403 case CRYPTO_AES_NIST_GMAC: 1404 case CRYPTO_AES_CCM_CBC_MAC: 1405 panic("bad eta auth algo"); 1406 } 1407 #endif 1408 1409 error = swcr_setup_auth(ses, csp); 1410 if (error) 1411 break; 1412 if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) { 1413 /* Effectively degrade to digest mode. */ 1414 ses->swcr_process = swcr_authcompute; 1415 break; 1416 } 1417 1418 error = swcr_setup_cipher(ses, csp); 1419 if (error == 0) 1420 ses->swcr_process = swcr_eta; 1421 break; 1422 default: 1423 error = EINVAL; 1424 } 1425 1426 if (error) 1427 swcr_freesession(dev, cses); 1428 return (error); 1429 } 1430 1431 static void 1432 swcr_freesession(device_t dev, crypto_session_t cses) 1433 { 1434 struct swcr_session *ses; 1435 1436 ses = crypto_get_driver_session(cses); 1437 1438 mtx_destroy(&ses->swcr_lock); 1439 1440 zfree(ses->swcr_encdec.sw_kschedule, M_CRYPTO_DATA); 1441 zfree(ses->swcr_auth.sw_ictx, M_CRYPTO_DATA); 1442 zfree(ses->swcr_auth.sw_octx, M_CRYPTO_DATA); 1443 } 1444 1445 /* 1446 * Process a software request. 1447 */ 1448 static int 1449 swcr_process(device_t dev, struct cryptop *crp, int hint) 1450 { 1451 struct swcr_session *ses; 1452 1453 ses = crypto_get_driver_session(crp->crp_session); 1454 mtx_lock(&ses->swcr_lock); 1455 1456 crp->crp_etype = ses->swcr_process(ses, crp); 1457 1458 mtx_unlock(&ses->swcr_lock); 1459 crypto_done(crp); 1460 return (0); 1461 } 1462 1463 static void 1464 swcr_identify(driver_t *drv, device_t parent) 1465 { 1466 /* NB: order 10 is so we get attached after h/w devices */ 1467 if (device_find_child(parent, "cryptosoft", -1) == NULL && 1468 BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0) 1469 panic("cryptosoft: could not attach"); 1470 } 1471 1472 static int 1473 swcr_probe(device_t dev) 1474 { 1475 device_set_desc(dev, "software crypto"); 1476 return (BUS_PROBE_NOWILDCARD); 1477 } 1478 1479 static int 1480 swcr_attach(device_t dev) 1481 { 1482 1483 swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session), 1484 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); 1485 if (swcr_id < 0) { 1486 device_printf(dev, "cannot initialize!"); 1487 return (ENXIO); 1488 } 1489 1490 return (0); 1491 } 1492 1493 static int 1494 swcr_detach(device_t dev) 1495 { 1496 crypto_unregister_all(swcr_id); 1497 return 0; 1498 } 1499 1500 static device_method_t swcr_methods[] = { 1501 DEVMETHOD(device_identify, swcr_identify), 1502 DEVMETHOD(device_probe, swcr_probe), 1503 DEVMETHOD(device_attach, swcr_attach), 1504 DEVMETHOD(device_detach, swcr_detach), 1505 1506 DEVMETHOD(cryptodev_probesession, swcr_probesession), 1507 DEVMETHOD(cryptodev_newsession, swcr_newsession), 1508 DEVMETHOD(cryptodev_freesession,swcr_freesession), 1509 DEVMETHOD(cryptodev_process, swcr_process), 1510 1511 {0, 0}, 1512 }; 1513 1514 static driver_t swcr_driver = { 1515 "cryptosoft", 1516 swcr_methods, 1517 0, /* NB: no softc */ 1518 }; 1519 static devclass_t swcr_devclass; 1520 1521 /* 1522 * NB: We explicitly reference the crypto module so we 1523 * get the necessary ordering when built as a loadable 1524 * module. This is required because we bundle the crypto 1525 * module code together with the cryptosoft driver (otherwise 1526 * normal module dependencies would handle things). 1527 */ 1528 extern int crypto_modevent(struct module *, int, void *); 1529 /* XXX where to attach */ 1530 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); 1531 MODULE_VERSION(cryptosoft, 1); 1532 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1); 1533