1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ 2 3 /*- 4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 5 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting 6 * 7 * This code was written by Angelos D. Keromytis in Athens, Greece, in 8 * February 2000. Network Security Technologies Inc. (NSTI) kindly 9 * supported the development of this code. 10 * 11 * Copyright (c) 2000, 2001 Angelos D. Keromytis 12 * Copyright (c) 2014-2021 The FreeBSD Foundation 13 * All rights reserved. 14 * 15 * Portions of this software were developed by John-Mark Gurney 16 * under sponsorship of the FreeBSD Foundation and 17 * Rubicon Communications, LLC (Netgate). 18 * 19 * Portions of this software were developed by Ararat River 20 * Consulting, LLC under sponsorship of the FreeBSD Foundation. 21 * 22 * Permission to use, copy, and modify this software with or without fee 23 * is hereby granted, provided that this entire notice is included in 24 * all source code copies of any software which is or includes a copy or 25 * modification of this software. 26 * 27 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 28 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 29 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 30 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 31 * PURPOSE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/malloc.h> 40 #include <sys/mbuf.h> 41 #include <sys/module.h> 42 #include <sys/sysctl.h> 43 #include <sys/errno.h> 44 #include <sys/random.h> 45 #include <sys/kernel.h> 46 #include <sys/uio.h> 47 #include <sys/endian.h> 48 #include <sys/limits.h> 49 50 #include <crypto/sha1.h> 51 #include <opencrypto/rmd160.h> 52 53 #include <opencrypto/cryptodev.h> 54 #include <opencrypto/xform.h> 55 56 #include <sys/kobj.h> 57 #include <sys/bus.h> 58 #include "cryptodev_if.h" 59 60 struct swcr_auth { 61 void *sw_ictx; 62 void *sw_octx; 63 const struct auth_hash *sw_axf; 64 uint16_t sw_mlen; 65 bool sw_hmac; 66 }; 67 68 struct swcr_encdec { 69 void *sw_ctx; 70 const struct enc_xform *sw_exf; 71 }; 72 73 struct swcr_compdec { 74 const struct comp_algo *sw_cxf; 75 }; 76 77 struct swcr_session { 78 int (*swcr_process)(const struct swcr_session *, struct cryptop *); 79 80 struct swcr_auth swcr_auth; 81 struct swcr_encdec swcr_encdec; 82 struct swcr_compdec swcr_compdec; 83 }; 84 85 static int32_t swcr_id; 86 87 static void swcr_freesession(device_t dev, crypto_session_t cses); 88 89 /* Used for CRYPTO_NULL_CBC. */ 90 static int 91 swcr_null(const struct swcr_session *ses, struct cryptop *crp) 92 { 93 94 return (0); 95 } 96 97 /* 98 * Apply a symmetric encryption/decryption algorithm. 99 */ 100 static int 101 swcr_encdec(const struct swcr_session *ses, struct cryptop *crp) 102 { 103 unsigned char blk[EALG_MAX_BLOCK_LEN]; 104 const struct crypto_session_params *csp; 105 const struct enc_xform *exf; 106 const struct swcr_encdec *sw; 107 void *ctx; 108 size_t inlen, outlen; 109 int blks, resid; 110 struct crypto_buffer_cursor cc_in, cc_out; 111 const unsigned char *inblk; 112 unsigned char *outblk; 113 int error; 114 bool encrypting; 115 116 error = 0; 117 118 sw = &ses->swcr_encdec; 119 exf = sw->sw_exf; 120 csp = crypto_get_params(crp->crp_session); 121 122 if (exf->native_blocksize == 0) { 123 /* Check for non-padded data */ 124 if ((crp->crp_payload_length % exf->blocksize) != 0) 125 return (EINVAL); 126 127 blks = exf->blocksize; 128 } else 129 blks = exf->native_blocksize; 130 131 if (exf == &enc_xform_aes_icm && 132 (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 133 return (EINVAL); 134 135 ctx = __builtin_alloca(exf->ctxsize); 136 if (crp->crp_cipher_key != NULL) { 137 error = exf->setkey(ctx, crp->crp_cipher_key, 138 csp->csp_cipher_klen); 139 if (error) 140 return (error); 141 } else 142 memcpy(ctx, sw->sw_ctx, exf->ctxsize); 143 144 crypto_read_iv(crp, blk); 145 exf->reinit(ctx, blk, csp->csp_ivlen); 146 147 crypto_cursor_init(&cc_in, &crp->crp_buf); 148 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 149 inblk = crypto_cursor_segment(&cc_in, &inlen); 150 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 151 crypto_cursor_init(&cc_out, &crp->crp_obuf); 152 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); 153 } else 154 cc_out = cc_in; 155 outblk = crypto_cursor_segment(&cc_out, &outlen); 156 157 resid = crp->crp_payload_length; 158 encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); 159 160 /* 161 * Loop through encrypting blocks. 'inlen' is the remaining 162 * length of the current segment in the input buffer. 163 * 'outlen' is the remaining length of current segment in the 164 * output buffer. 165 */ 166 while (resid >= blks) { 167 /* 168 * If the current block is not contained within the 169 * current input/output segment, use 'blk' as a local 170 * buffer. 171 */ 172 if (inlen < blks) { 173 crypto_cursor_copydata(&cc_in, blks, blk); 174 inblk = blk; 175 } 176 if (outlen < blks) 177 outblk = blk; 178 179 if (encrypting) 180 exf->encrypt(ctx, inblk, outblk); 181 else 182 exf->decrypt(ctx, inblk, outblk); 183 184 if (inlen < blks) { 185 inblk = crypto_cursor_segment(&cc_in, &inlen); 186 } else { 187 crypto_cursor_advance(&cc_in, blks); 188 inlen -= blks; 189 inblk += blks; 190 } 191 192 if (outlen < blks) { 193 crypto_cursor_copyback(&cc_out, blks, blk); 194 outblk = crypto_cursor_segment(&cc_out, &outlen); 195 } else { 196 crypto_cursor_advance(&cc_out, blks); 197 outlen -= blks; 198 outblk += blks; 199 } 200 201 resid -= blks; 202 } 203 204 /* Handle trailing partial block for stream ciphers. */ 205 if (resid > 0) { 206 KASSERT(exf->native_blocksize != 0, 207 ("%s: partial block of %d bytes for cipher %s", 208 __func__, resid, exf->name)); 209 KASSERT(resid < blks, ("%s: partial block too big", __func__)); 210 211 inblk = crypto_cursor_segment(&cc_in, &inlen); 212 outblk = crypto_cursor_segment(&cc_out, &outlen); 213 if (inlen < resid) { 214 crypto_cursor_copydata(&cc_in, resid, blk); 215 inblk = blk; 216 } 217 if (outlen < resid) 218 outblk = blk; 219 if (encrypting) 220 exf->encrypt_last(ctx, inblk, outblk, 221 resid); 222 else 223 exf->decrypt_last(ctx, inblk, outblk, 224 resid); 225 if (outlen < resid) 226 crypto_cursor_copyback(&cc_out, resid, blk); 227 } 228 229 explicit_bzero(ctx, exf->ctxsize); 230 explicit_bzero(blk, sizeof(blk)); 231 return (0); 232 } 233 234 /* 235 * Compute or verify hash. 236 */ 237 static int 238 swcr_authcompute(const struct swcr_session *ses, struct cryptop *crp) 239 { 240 u_char aalg[HASH_MAX_LEN]; 241 const struct crypto_session_params *csp; 242 const struct swcr_auth *sw; 243 const struct auth_hash *axf; 244 union authctx ctx; 245 int err; 246 247 sw = &ses->swcr_auth; 248 249 axf = sw->sw_axf; 250 251 csp = crypto_get_params(crp->crp_session); 252 if (crp->crp_auth_key != NULL) { 253 if (sw->sw_hmac) { 254 hmac_init_ipad(axf, crp->crp_auth_key, 255 csp->csp_auth_klen, &ctx); 256 } else { 257 axf->Init(&ctx); 258 axf->Setkey(&ctx, crp->crp_auth_key, 259 csp->csp_auth_klen); 260 } 261 } else 262 memcpy(&ctx, sw->sw_ictx, axf->ctxsize); 263 264 if (crp->crp_aad != NULL) 265 err = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); 266 else 267 err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, 268 axf->Update, &ctx); 269 if (err) 270 goto out; 271 272 if (CRYPTO_HAS_OUTPUT_BUFFER(crp) && 273 CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 274 err = crypto_apply_buf(&crp->crp_obuf, 275 crp->crp_payload_output_start, crp->crp_payload_length, 276 axf->Update, &ctx); 277 else 278 err = crypto_apply(crp, crp->crp_payload_start, 279 crp->crp_payload_length, axf->Update, &ctx); 280 if (err) 281 goto out; 282 283 if (csp->csp_flags & CSP_F_ESN) 284 axf->Update(&ctx, crp->crp_esn, 4); 285 286 axf->Final(aalg, &ctx); 287 if (sw->sw_hmac) { 288 if (crp->crp_auth_key != NULL) 289 hmac_init_opad(axf, crp->crp_auth_key, 290 csp->csp_auth_klen, &ctx); 291 else 292 memcpy(&ctx, sw->sw_octx, axf->ctxsize); 293 axf->Update(&ctx, aalg, axf->hashsize); 294 axf->Final(aalg, &ctx); 295 } 296 297 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 298 u_char uaalg[HASH_MAX_LEN]; 299 300 crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg); 301 if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0) 302 err = EBADMSG; 303 explicit_bzero(uaalg, sizeof(uaalg)); 304 } else { 305 /* Inject the authentication data */ 306 crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg); 307 } 308 explicit_bzero(aalg, sizeof(aalg)); 309 out: 310 explicit_bzero(&ctx, sizeof(ctx)); 311 return (err); 312 } 313 314 CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */ 315 CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */ 316 317 static int 318 swcr_gmac(const struct swcr_session *ses, struct cryptop *crp) 319 { 320 uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; 321 u_char *blk = (u_char *)blkbuf; 322 u_char tag[GMAC_DIGEST_LEN]; 323 u_char iv[AES_BLOCK_LEN]; 324 struct crypto_buffer_cursor cc; 325 const u_char *inblk; 326 union authctx ctx; 327 const struct swcr_auth *swa; 328 const struct auth_hash *axf; 329 uint32_t *blkp; 330 size_t len; 331 int blksz, error, ivlen, resid; 332 333 swa = &ses->swcr_auth; 334 axf = swa->sw_axf; 335 blksz = GMAC_BLOCK_LEN; 336 KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", 337 __func__)); 338 339 if (crp->crp_auth_key != NULL) { 340 axf->Init(&ctx); 341 axf->Setkey(&ctx, crp->crp_auth_key, 342 crypto_get_params(crp->crp_session)->csp_auth_klen); 343 } else 344 memcpy(&ctx, swa->sw_ictx, axf->ctxsize); 345 346 /* Initialize the IV */ 347 ivlen = AES_GCM_IV_LEN; 348 crypto_read_iv(crp, iv); 349 350 axf->Reinit(&ctx, iv, ivlen); 351 crypto_cursor_init(&cc, &crp->crp_buf); 352 crypto_cursor_advance(&cc, crp->crp_payload_start); 353 for (resid = crp->crp_payload_length; resid >= blksz; resid -= len) { 354 inblk = crypto_cursor_segment(&cc, &len); 355 if (len >= blksz) { 356 len = rounddown(MIN(len, resid), blksz); 357 crypto_cursor_advance(&cc, len); 358 } else { 359 len = blksz; 360 crypto_cursor_copydata(&cc, len, blk); 361 inblk = blk; 362 } 363 axf->Update(&ctx, inblk, len); 364 } 365 if (resid > 0) { 366 memset(blk, 0, blksz); 367 crypto_cursor_copydata(&cc, resid, blk); 368 axf->Update(&ctx, blk, blksz); 369 } 370 371 /* length block */ 372 memset(blk, 0, blksz); 373 blkp = (uint32_t *)blk + 1; 374 *blkp = htobe32(crp->crp_payload_length * 8); 375 axf->Update(&ctx, blk, blksz); 376 377 /* Finalize MAC */ 378 axf->Final(tag, &ctx); 379 380 error = 0; 381 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 382 u_char tag2[GMAC_DIGEST_LEN]; 383 384 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, 385 tag2); 386 if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0) 387 error = EBADMSG; 388 explicit_bzero(tag2, sizeof(tag2)); 389 } else { 390 /* Inject the authentication data */ 391 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 392 } 393 explicit_bzero(blkbuf, sizeof(blkbuf)); 394 explicit_bzero(tag, sizeof(tag)); 395 explicit_bzero(iv, sizeof(iv)); 396 return (error); 397 } 398 399 static int 400 swcr_gcm(const struct swcr_session *ses, struct cryptop *crp) 401 { 402 uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; 403 u_char *blk = (u_char *)blkbuf; 404 u_char tag[GMAC_DIGEST_LEN]; 405 struct crypto_buffer_cursor cc_in, cc_out; 406 const u_char *inblk; 407 u_char *outblk; 408 const struct swcr_auth *swa; 409 const struct swcr_encdec *swe; 410 const struct enc_xform *exf; 411 void *ctx; 412 uint32_t *blkp; 413 size_t len; 414 int blksz, error, ivlen, r, resid; 415 416 swa = &ses->swcr_auth; 417 swe = &ses->swcr_encdec; 418 exf = swe->sw_exf; 419 blksz = GMAC_BLOCK_LEN; 420 KASSERT(blksz == exf->native_blocksize, 421 ("%s: blocksize mismatch", __func__)); 422 423 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 424 return (EINVAL); 425 426 ivlen = AES_GCM_IV_LEN; 427 428 ctx = __builtin_alloca(exf->ctxsize); 429 if (crp->crp_cipher_key != NULL) 430 exf->setkey(ctx, crp->crp_cipher_key, 431 crypto_get_params(crp->crp_session)->csp_cipher_klen); 432 else 433 memcpy(ctx, swe->sw_ctx, exf->ctxsize); 434 exf->reinit(ctx, crp->crp_iv, ivlen); 435 436 /* Supply MAC with AAD */ 437 if (crp->crp_aad != NULL) { 438 len = rounddown(crp->crp_aad_length, blksz); 439 if (len != 0) 440 exf->update(ctx, crp->crp_aad, len); 441 if (crp->crp_aad_length != len) { 442 memset(blk, 0, blksz); 443 memcpy(blk, (char *)crp->crp_aad + len, 444 crp->crp_aad_length - len); 445 exf->update(ctx, blk, blksz); 446 } 447 } else { 448 crypto_cursor_init(&cc_in, &crp->crp_buf); 449 crypto_cursor_advance(&cc_in, crp->crp_aad_start); 450 for (resid = crp->crp_aad_length; resid >= blksz; 451 resid -= len) { 452 inblk = crypto_cursor_segment(&cc_in, &len); 453 if (len >= blksz) { 454 len = rounddown(MIN(len, resid), blksz); 455 crypto_cursor_advance(&cc_in, len); 456 } else { 457 len = blksz; 458 crypto_cursor_copydata(&cc_in, len, blk); 459 inblk = blk; 460 } 461 exf->update(ctx, inblk, len); 462 } 463 if (resid > 0) { 464 memset(blk, 0, blksz); 465 crypto_cursor_copydata(&cc_in, resid, blk); 466 exf->update(ctx, blk, blksz); 467 } 468 } 469 470 /* Do encryption with MAC */ 471 crypto_cursor_init(&cc_in, &crp->crp_buf); 472 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 473 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 474 crypto_cursor_init(&cc_out, &crp->crp_obuf); 475 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); 476 } else 477 cc_out = cc_in; 478 for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { 479 inblk = crypto_cursor_segment(&cc_in, &len); 480 if (len < blksz) { 481 crypto_cursor_copydata(&cc_in, blksz, blk); 482 inblk = blk; 483 } else { 484 crypto_cursor_advance(&cc_in, blksz); 485 } 486 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 487 outblk = crypto_cursor_segment(&cc_out, &len); 488 if (len < blksz) 489 outblk = blk; 490 exf->encrypt(ctx, inblk, outblk); 491 exf->update(ctx, outblk, blksz); 492 if (outblk == blk) 493 crypto_cursor_copyback(&cc_out, blksz, blk); 494 else 495 crypto_cursor_advance(&cc_out, blksz); 496 } else { 497 exf->update(ctx, inblk, blksz); 498 } 499 } 500 if (resid > 0) { 501 crypto_cursor_copydata(&cc_in, resid, blk); 502 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 503 exf->encrypt_last(ctx, blk, blk, resid); 504 crypto_cursor_copyback(&cc_out, resid, blk); 505 } 506 exf->update(ctx, blk, resid); 507 } 508 509 /* length block */ 510 memset(blk, 0, blksz); 511 blkp = (uint32_t *)blk + 1; 512 *blkp = htobe32(crp->crp_aad_length * 8); 513 blkp = (uint32_t *)blk + 3; 514 *blkp = htobe32(crp->crp_payload_length * 8); 515 exf->update(ctx, blk, blksz); 516 517 /* Finalize MAC */ 518 exf->final(tag, ctx); 519 520 /* Validate tag */ 521 error = 0; 522 if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 523 u_char tag2[GMAC_DIGEST_LEN]; 524 525 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); 526 527 r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); 528 explicit_bzero(tag2, sizeof(tag2)); 529 if (r != 0) { 530 error = EBADMSG; 531 goto out; 532 } 533 534 /* tag matches, decrypt data */ 535 crypto_cursor_init(&cc_in, &crp->crp_buf); 536 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 537 for (resid = crp->crp_payload_length; resid > blksz; 538 resid -= blksz) { 539 inblk = crypto_cursor_segment(&cc_in, &len); 540 if (len < blksz) { 541 crypto_cursor_copydata(&cc_in, blksz, blk); 542 inblk = blk; 543 } else 544 crypto_cursor_advance(&cc_in, blksz); 545 outblk = crypto_cursor_segment(&cc_out, &len); 546 if (len < blksz) 547 outblk = blk; 548 exf->decrypt(ctx, inblk, outblk); 549 if (outblk == blk) 550 crypto_cursor_copyback(&cc_out, blksz, blk); 551 else 552 crypto_cursor_advance(&cc_out, blksz); 553 } 554 if (resid > 0) { 555 crypto_cursor_copydata(&cc_in, resid, blk); 556 exf->decrypt_last(ctx, blk, blk, resid); 557 crypto_cursor_copyback(&cc_out, resid, blk); 558 } 559 } else { 560 /* Inject the authentication data */ 561 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 562 } 563 564 out: 565 explicit_bzero(ctx, exf->ctxsize); 566 explicit_bzero(blkbuf, sizeof(blkbuf)); 567 explicit_bzero(tag, sizeof(tag)); 568 569 return (error); 570 } 571 572 static void 573 build_ccm_b0(const char *nonce, u_int nonce_length, u_int aad_length, 574 u_int data_length, u_int tag_length, uint8_t *b0) 575 { 576 uint8_t *bp; 577 uint8_t flags, L; 578 579 KASSERT(nonce_length >= 7 && nonce_length <= 13, 580 ("nonce_length must be between 7 and 13 bytes")); 581 582 /* 583 * Need to determine the L field value. This is the number of 584 * bytes needed to specify the length of the message; the length 585 * is whatever is left in the 16 bytes after specifying flags and 586 * the nonce. 587 */ 588 L = 15 - nonce_length; 589 590 flags = ((aad_length > 0) << 6) + 591 (((tag_length - 2) / 2) << 3) + 592 L - 1; 593 594 /* 595 * Now we need to set up the first block, which has flags, nonce, 596 * and the message length. 597 */ 598 b0[0] = flags; 599 memcpy(b0 + 1, nonce, nonce_length); 600 bp = b0 + 1 + nonce_length; 601 602 /* Need to copy L' [aka L-1] bytes of data_length */ 603 for (uint8_t *dst = b0 + CCM_CBC_BLOCK_LEN - 1; dst >= bp; dst--) { 604 *dst = data_length; 605 data_length >>= 8; 606 } 607 } 608 609 /* NB: OCF only supports AAD lengths < 2^32. */ 610 static int 611 build_ccm_aad_length(u_int aad_length, uint8_t *blk) 612 { 613 if (aad_length < ((1 << 16) - (1 << 8))) { 614 be16enc(blk, aad_length); 615 return (sizeof(uint16_t)); 616 } else { 617 blk[0] = 0xff; 618 blk[1] = 0xfe; 619 be32enc(blk + 2, aad_length); 620 return (2 + sizeof(uint32_t)); 621 } 622 } 623 624 static int 625 swcr_ccm_cbc_mac(const struct swcr_session *ses, struct cryptop *crp) 626 { 627 u_char iv[AES_BLOCK_LEN]; 628 u_char blk[CCM_CBC_BLOCK_LEN]; 629 u_char tag[AES_CBC_MAC_HASH_LEN]; 630 union authctx ctx; 631 const struct crypto_session_params *csp; 632 const struct swcr_auth *swa; 633 const struct auth_hash *axf; 634 int error, ivlen, len; 635 636 csp = crypto_get_params(crp->crp_session); 637 swa = &ses->swcr_auth; 638 axf = swa->sw_axf; 639 640 if (crp->crp_auth_key != NULL) { 641 axf->Init(&ctx); 642 axf->Setkey(&ctx, crp->crp_auth_key, csp->csp_auth_klen); 643 } else 644 memcpy(&ctx, swa->sw_ictx, axf->ctxsize); 645 646 /* Initialize the IV */ 647 ivlen = csp->csp_ivlen; 648 crypto_read_iv(crp, iv); 649 650 /* Supply MAC with IV */ 651 axf->Reinit(&ctx, crp->crp_iv, ivlen); 652 653 /* Supply MAC with b0. */ 654 build_ccm_b0(crp->crp_iv, ivlen, crp->crp_payload_length, 0, 655 swa->sw_mlen, blk); 656 axf->Update(&ctx, blk, CCM_CBC_BLOCK_LEN); 657 658 len = build_ccm_aad_length(crp->crp_payload_length, blk); 659 axf->Update(&ctx, blk, len); 660 661 crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, 662 axf->Update, &ctx); 663 664 /* Finalize MAC */ 665 axf->Final(tag, &ctx); 666 667 error = 0; 668 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 669 u_char tag2[AES_CBC_MAC_HASH_LEN]; 670 671 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, 672 tag2); 673 if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0) 674 error = EBADMSG; 675 explicit_bzero(tag2, sizeof(tag)); 676 } else { 677 /* Inject the authentication data */ 678 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 679 } 680 explicit_bzero(tag, sizeof(tag)); 681 explicit_bzero(blk, sizeof(blk)); 682 explicit_bzero(iv, sizeof(iv)); 683 return (error); 684 } 685 686 static int 687 swcr_ccm(const struct swcr_session *ses, struct cryptop *crp) 688 { 689 const struct crypto_session_params *csp; 690 uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; 691 u_char *blk = (u_char *)blkbuf; 692 u_char tag[AES_CBC_MAC_HASH_LEN]; 693 struct crypto_buffer_cursor cc_in, cc_out; 694 const u_char *inblk; 695 u_char *outblk; 696 const struct swcr_auth *swa; 697 const struct swcr_encdec *swe; 698 const struct enc_xform *exf; 699 void *ctx; 700 size_t len; 701 int blksz, error, ivlen, r, resid; 702 703 csp = crypto_get_params(crp->crp_session); 704 swa = &ses->swcr_auth; 705 swe = &ses->swcr_encdec; 706 exf = swe->sw_exf; 707 blksz = AES_BLOCK_LEN; 708 KASSERT(blksz == exf->native_blocksize, 709 ("%s: blocksize mismatch", __func__)); 710 711 if (crp->crp_payload_length > ccm_max_payload_length(csp)) 712 return (EMSGSIZE); 713 714 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 715 return (EINVAL); 716 717 ivlen = csp->csp_ivlen; 718 719 ctx = __builtin_alloca(exf->ctxsize); 720 if (crp->crp_cipher_key != NULL) 721 exf->setkey(ctx, crp->crp_cipher_key, 722 crypto_get_params(crp->crp_session)->csp_cipher_klen); 723 else 724 memcpy(ctx, swe->sw_ctx, exf->ctxsize); 725 exf->reinit(ctx, crp->crp_iv, ivlen); 726 727 /* Supply MAC with b0. */ 728 _Static_assert(sizeof(blkbuf) >= CCM_CBC_BLOCK_LEN, 729 "blkbuf too small for b0"); 730 build_ccm_b0(crp->crp_iv, ivlen, crp->crp_aad_length, 731 crp->crp_payload_length, swa->sw_mlen, blk); 732 exf->update(ctx, blk, CCM_CBC_BLOCK_LEN); 733 734 /* Supply MAC with AAD */ 735 if (crp->crp_aad_length != 0) { 736 len = build_ccm_aad_length(crp->crp_aad_length, blk); 737 exf->update(ctx, blk, len); 738 if (crp->crp_aad != NULL) 739 exf->update(ctx, crp->crp_aad, crp->crp_aad_length); 740 else 741 crypto_apply(crp, crp->crp_aad_start, 742 crp->crp_aad_length, exf->update, ctx); 743 744 /* Pad the AAD (including length field) to a full block. */ 745 len = (len + crp->crp_aad_length) % CCM_CBC_BLOCK_LEN; 746 if (len != 0) { 747 len = CCM_CBC_BLOCK_LEN - len; 748 memset(blk, 0, CCM_CBC_BLOCK_LEN); 749 exf->update(ctx, blk, len); 750 } 751 } 752 753 /* Do encryption/decryption with MAC */ 754 crypto_cursor_init(&cc_in, &crp->crp_buf); 755 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 756 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 757 crypto_cursor_init(&cc_out, &crp->crp_obuf); 758 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); 759 } else 760 cc_out = cc_in; 761 for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { 762 inblk = crypto_cursor_segment(&cc_in, &len); 763 if (len < blksz) { 764 crypto_cursor_copydata(&cc_in, blksz, blk); 765 inblk = blk; 766 } else 767 crypto_cursor_advance(&cc_in, blksz); 768 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 769 outblk = crypto_cursor_segment(&cc_out, &len); 770 if (len < blksz) 771 outblk = blk; 772 exf->update(ctx, inblk, blksz); 773 exf->encrypt(ctx, inblk, outblk); 774 if (outblk == blk) 775 crypto_cursor_copyback(&cc_out, blksz, blk); 776 else 777 crypto_cursor_advance(&cc_out, blksz); 778 } else { 779 /* 780 * One of the problems with CCM+CBC is that 781 * the authentication is done on the 782 * unencrypted data. As a result, we have to 783 * decrypt the data twice: once to generate 784 * the tag and a second time after the tag is 785 * verified. 786 */ 787 exf->decrypt(ctx, inblk, blk); 788 exf->update(ctx, blk, blksz); 789 } 790 } 791 if (resid > 0) { 792 crypto_cursor_copydata(&cc_in, resid, blk); 793 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 794 exf->update(ctx, blk, resid); 795 exf->encrypt_last(ctx, blk, blk, resid); 796 crypto_cursor_copyback(&cc_out, resid, blk); 797 } else { 798 exf->decrypt_last(ctx, blk, blk, resid); 799 exf->update(ctx, blk, resid); 800 } 801 } 802 803 /* Finalize MAC */ 804 exf->final(tag, ctx); 805 806 /* Validate tag */ 807 error = 0; 808 if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 809 u_char tag2[AES_CBC_MAC_HASH_LEN]; 810 811 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, 812 tag2); 813 814 r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); 815 explicit_bzero(tag2, sizeof(tag2)); 816 if (r != 0) { 817 error = EBADMSG; 818 goto out; 819 } 820 821 /* tag matches, decrypt data */ 822 exf->reinit(ctx, crp->crp_iv, ivlen); 823 crypto_cursor_init(&cc_in, &crp->crp_buf); 824 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 825 for (resid = crp->crp_payload_length; resid > blksz; 826 resid -= blksz) { 827 inblk = crypto_cursor_segment(&cc_in, &len); 828 if (len < blksz) { 829 crypto_cursor_copydata(&cc_in, blksz, blk); 830 inblk = blk; 831 } else 832 crypto_cursor_advance(&cc_in, blksz); 833 outblk = crypto_cursor_segment(&cc_out, &len); 834 if (len < blksz) 835 outblk = blk; 836 exf->decrypt(ctx, inblk, outblk); 837 if (outblk == blk) 838 crypto_cursor_copyback(&cc_out, blksz, blk); 839 else 840 crypto_cursor_advance(&cc_out, blksz); 841 } 842 if (resid > 0) { 843 crypto_cursor_copydata(&cc_in, resid, blk); 844 exf->decrypt_last(ctx, blk, blk, resid); 845 crypto_cursor_copyback(&cc_out, resid, blk); 846 } 847 } else { 848 /* Inject the authentication data */ 849 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 850 } 851 852 out: 853 explicit_bzero(ctx, exf->ctxsize); 854 explicit_bzero(blkbuf, sizeof(blkbuf)); 855 explicit_bzero(tag, sizeof(tag)); 856 return (error); 857 } 858 859 static int 860 swcr_chacha20_poly1305(const struct swcr_session *ses, struct cryptop *crp) 861 { 862 const struct crypto_session_params *csp; 863 uint64_t blkbuf[howmany(CHACHA20_NATIVE_BLOCK_LEN, sizeof(uint64_t))]; 864 u_char *blk = (u_char *)blkbuf; 865 u_char tag[POLY1305_HASH_LEN]; 866 struct crypto_buffer_cursor cc_in, cc_out; 867 const u_char *inblk; 868 u_char *outblk; 869 uint64_t *blkp; 870 const struct swcr_auth *swa; 871 const struct swcr_encdec *swe; 872 const struct enc_xform *exf; 873 void *ctx; 874 size_t len; 875 int blksz, error, r, resid; 876 877 swa = &ses->swcr_auth; 878 swe = &ses->swcr_encdec; 879 exf = swe->sw_exf; 880 blksz = exf->native_blocksize; 881 KASSERT(blksz <= sizeof(blkbuf), ("%s: blocksize mismatch", __func__)); 882 883 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 884 return (EINVAL); 885 886 csp = crypto_get_params(crp->crp_session); 887 888 ctx = __builtin_alloca(exf->ctxsize); 889 if (crp->crp_cipher_key != NULL) 890 exf->setkey(ctx, crp->crp_cipher_key, 891 csp->csp_cipher_klen); 892 else 893 memcpy(ctx, swe->sw_ctx, exf->ctxsize); 894 exf->reinit(ctx, crp->crp_iv, csp->csp_ivlen); 895 896 /* Supply MAC with AAD */ 897 if (crp->crp_aad != NULL) 898 exf->update(ctx, crp->crp_aad, crp->crp_aad_length); 899 else 900 crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, 901 exf->update, ctx); 902 if (crp->crp_aad_length % 16 != 0) { 903 /* padding1 */ 904 memset(blk, 0, 16); 905 exf->update(ctx, blk, 16 - crp->crp_aad_length % 16); 906 } 907 908 /* Do encryption with MAC */ 909 crypto_cursor_init(&cc_in, &crp->crp_buf); 910 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 911 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 912 crypto_cursor_init(&cc_out, &crp->crp_obuf); 913 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); 914 } else 915 cc_out = cc_in; 916 for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { 917 inblk = crypto_cursor_segment(&cc_in, &len); 918 if (len < blksz) { 919 crypto_cursor_copydata(&cc_in, blksz, blk); 920 inblk = blk; 921 } else 922 crypto_cursor_advance(&cc_in, blksz); 923 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 924 outblk = crypto_cursor_segment(&cc_out, &len); 925 if (len < blksz) 926 outblk = blk; 927 exf->encrypt(ctx, inblk, outblk); 928 exf->update(ctx, outblk, blksz); 929 if (outblk == blk) 930 crypto_cursor_copyback(&cc_out, blksz, blk); 931 else 932 crypto_cursor_advance(&cc_out, blksz); 933 } else { 934 exf->update(ctx, inblk, blksz); 935 } 936 } 937 if (resid > 0) { 938 crypto_cursor_copydata(&cc_in, resid, blk); 939 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 940 exf->encrypt_last(ctx, blk, blk, resid); 941 crypto_cursor_copyback(&cc_out, resid, blk); 942 } 943 exf->update(ctx, blk, resid); 944 if (resid % 16 != 0) { 945 /* padding2 */ 946 memset(blk, 0, 16); 947 exf->update(ctx, blk, 16 - resid % 16); 948 } 949 } 950 951 /* lengths */ 952 blkp = (uint64_t *)blk; 953 blkp[0] = htole64(crp->crp_aad_length); 954 blkp[1] = htole64(crp->crp_payload_length); 955 exf->update(ctx, blk, sizeof(uint64_t) * 2); 956 957 /* Finalize MAC */ 958 exf->final(tag, ctx); 959 960 /* Validate tag */ 961 error = 0; 962 if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 963 u_char tag2[POLY1305_HASH_LEN]; 964 965 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); 966 967 r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); 968 explicit_bzero(tag2, sizeof(tag2)); 969 if (r != 0) { 970 error = EBADMSG; 971 goto out; 972 } 973 974 /* tag matches, decrypt data */ 975 crypto_cursor_init(&cc_in, &crp->crp_buf); 976 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 977 for (resid = crp->crp_payload_length; resid > blksz; 978 resid -= blksz) { 979 inblk = crypto_cursor_segment(&cc_in, &len); 980 if (len < blksz) { 981 crypto_cursor_copydata(&cc_in, blksz, blk); 982 inblk = blk; 983 } else 984 crypto_cursor_advance(&cc_in, blksz); 985 outblk = crypto_cursor_segment(&cc_out, &len); 986 if (len < blksz) 987 outblk = blk; 988 exf->decrypt(ctx, inblk, outblk); 989 if (outblk == blk) 990 crypto_cursor_copyback(&cc_out, blksz, blk); 991 else 992 crypto_cursor_advance(&cc_out, blksz); 993 } 994 if (resid > 0) { 995 crypto_cursor_copydata(&cc_in, resid, blk); 996 exf->decrypt_last(ctx, blk, blk, resid); 997 crypto_cursor_copyback(&cc_out, resid, blk); 998 } 999 } else { 1000 /* Inject the authentication data */ 1001 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 1002 } 1003 1004 out: 1005 explicit_bzero(ctx, exf->ctxsize); 1006 explicit_bzero(blkbuf, sizeof(blkbuf)); 1007 explicit_bzero(tag, sizeof(tag)); 1008 return (error); 1009 } 1010 1011 /* 1012 * Apply a cipher and a digest to perform EtA. 1013 */ 1014 static int 1015 swcr_eta(const struct swcr_session *ses, struct cryptop *crp) 1016 { 1017 int error; 1018 1019 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1020 error = swcr_encdec(ses, crp); 1021 if (error == 0) 1022 error = swcr_authcompute(ses, crp); 1023 } else { 1024 error = swcr_authcompute(ses, crp); 1025 if (error == 0) 1026 error = swcr_encdec(ses, crp); 1027 } 1028 return (error); 1029 } 1030 1031 /* 1032 * Apply a compression/decompression algorithm 1033 */ 1034 static int 1035 swcr_compdec(const struct swcr_session *ses, struct cryptop *crp) 1036 { 1037 const struct comp_algo *cxf; 1038 uint8_t *data, *out; 1039 int adj; 1040 uint32_t result; 1041 1042 cxf = ses->swcr_compdec.sw_cxf; 1043 1044 /* We must handle the whole buffer of data in one time 1045 * then if there is not all the data in the mbuf, we must 1046 * copy in a buffer. 1047 */ 1048 1049 data = malloc(crp->crp_payload_length, M_CRYPTO_DATA, M_NOWAIT); 1050 if (data == NULL) 1051 return (EINVAL); 1052 crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, 1053 data); 1054 1055 if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) 1056 result = cxf->compress(data, crp->crp_payload_length, &out); 1057 else 1058 result = cxf->decompress(data, crp->crp_payload_length, &out); 1059 1060 free(data, M_CRYPTO_DATA); 1061 if (result == 0) 1062 return (EINVAL); 1063 crp->crp_olen = result; 1064 1065 /* Check the compressed size when doing compression */ 1066 if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) { 1067 if (result >= crp->crp_payload_length) { 1068 /* Compression was useless, we lost time */ 1069 free(out, M_CRYPTO_DATA); 1070 return (0); 1071 } 1072 } 1073 1074 /* Copy back the (de)compressed data. m_copyback is 1075 * extending the mbuf as necessary. 1076 */ 1077 crypto_copyback(crp, crp->crp_payload_start, result, out); 1078 if (result < crp->crp_payload_length) { 1079 switch (crp->crp_buf.cb_type) { 1080 case CRYPTO_BUF_MBUF: 1081 case CRYPTO_BUF_SINGLE_MBUF: 1082 adj = result - crp->crp_payload_length; 1083 m_adj(crp->crp_buf.cb_mbuf, adj); 1084 break; 1085 case CRYPTO_BUF_UIO: { 1086 struct uio *uio = crp->crp_buf.cb_uio; 1087 int ind; 1088 1089 adj = crp->crp_payload_length - result; 1090 ind = uio->uio_iovcnt - 1; 1091 1092 while (adj > 0 && ind >= 0) { 1093 if (adj < uio->uio_iov[ind].iov_len) { 1094 uio->uio_iov[ind].iov_len -= adj; 1095 break; 1096 } 1097 1098 adj -= uio->uio_iov[ind].iov_len; 1099 uio->uio_iov[ind].iov_len = 0; 1100 ind--; 1101 uio->uio_iovcnt--; 1102 } 1103 } 1104 break; 1105 case CRYPTO_BUF_VMPAGE: 1106 adj = crp->crp_payload_length - result; 1107 crp->crp_buf.cb_vm_page_len -= adj; 1108 break; 1109 default: 1110 break; 1111 } 1112 } 1113 free(out, M_CRYPTO_DATA); 1114 return 0; 1115 } 1116 1117 static int 1118 swcr_setup_cipher(struct swcr_session *ses, 1119 const struct crypto_session_params *csp) 1120 { 1121 struct swcr_encdec *swe; 1122 const struct enc_xform *txf; 1123 int error; 1124 1125 swe = &ses->swcr_encdec; 1126 txf = crypto_cipher(csp); 1127 if (csp->csp_cipher_key != NULL) { 1128 if (txf->ctxsize != 0) { 1129 swe->sw_ctx = malloc(txf->ctxsize, M_CRYPTO_DATA, 1130 M_NOWAIT); 1131 if (swe->sw_ctx == NULL) 1132 return (ENOMEM); 1133 } 1134 error = txf->setkey(swe->sw_ctx, 1135 csp->csp_cipher_key, csp->csp_cipher_klen); 1136 if (error) 1137 return (error); 1138 } 1139 swe->sw_exf = txf; 1140 return (0); 1141 } 1142 1143 static int 1144 swcr_setup_auth(struct swcr_session *ses, 1145 const struct crypto_session_params *csp) 1146 { 1147 struct swcr_auth *swa; 1148 const struct auth_hash *axf; 1149 1150 swa = &ses->swcr_auth; 1151 1152 axf = crypto_auth_hash(csp); 1153 swa->sw_axf = axf; 1154 if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) 1155 return (EINVAL); 1156 if (csp->csp_auth_mlen == 0) 1157 swa->sw_mlen = axf->hashsize; 1158 else 1159 swa->sw_mlen = csp->csp_auth_mlen; 1160 if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL) { 1161 swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 1162 M_NOWAIT); 1163 if (swa->sw_ictx == NULL) 1164 return (ENOBUFS); 1165 } 1166 1167 switch (csp->csp_auth_alg) { 1168 case CRYPTO_SHA1_HMAC: 1169 case CRYPTO_SHA2_224_HMAC: 1170 case CRYPTO_SHA2_256_HMAC: 1171 case CRYPTO_SHA2_384_HMAC: 1172 case CRYPTO_SHA2_512_HMAC: 1173 case CRYPTO_RIPEMD160_HMAC: 1174 swa->sw_hmac = true; 1175 if (csp->csp_auth_key != NULL) { 1176 swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, 1177 M_NOWAIT); 1178 if (swa->sw_octx == NULL) 1179 return (ENOBUFS); 1180 hmac_init_ipad(axf, csp->csp_auth_key, 1181 csp->csp_auth_klen, swa->sw_ictx); 1182 hmac_init_opad(axf, csp->csp_auth_key, 1183 csp->csp_auth_klen, swa->sw_octx); 1184 } 1185 break; 1186 case CRYPTO_SHA1: 1187 case CRYPTO_SHA2_224: 1188 case CRYPTO_SHA2_256: 1189 case CRYPTO_SHA2_384: 1190 case CRYPTO_SHA2_512: 1191 case CRYPTO_NULL_HMAC: 1192 axf->Init(swa->sw_ictx); 1193 break; 1194 case CRYPTO_AES_NIST_GMAC: 1195 case CRYPTO_AES_CCM_CBC_MAC: 1196 case CRYPTO_POLY1305: 1197 if (csp->csp_auth_key != NULL) { 1198 axf->Init(swa->sw_ictx); 1199 axf->Setkey(swa->sw_ictx, csp->csp_auth_key, 1200 csp->csp_auth_klen); 1201 } 1202 break; 1203 case CRYPTO_BLAKE2B: 1204 case CRYPTO_BLAKE2S: 1205 /* 1206 * Blake2b and Blake2s support an optional key but do 1207 * not require one. 1208 */ 1209 if (csp->csp_auth_klen == 0) 1210 axf->Init(swa->sw_ictx); 1211 else if (csp->csp_auth_key != NULL) 1212 axf->Setkey(swa->sw_ictx, csp->csp_auth_key, 1213 csp->csp_auth_klen); 1214 break; 1215 } 1216 1217 if (csp->csp_mode == CSP_MODE_DIGEST) { 1218 switch (csp->csp_auth_alg) { 1219 case CRYPTO_AES_NIST_GMAC: 1220 ses->swcr_process = swcr_gmac; 1221 break; 1222 case CRYPTO_AES_CCM_CBC_MAC: 1223 ses->swcr_process = swcr_ccm_cbc_mac; 1224 break; 1225 default: 1226 ses->swcr_process = swcr_authcompute; 1227 } 1228 } 1229 1230 return (0); 1231 } 1232 1233 static int 1234 swcr_setup_aead(struct swcr_session *ses, 1235 const struct crypto_session_params *csp) 1236 { 1237 struct swcr_auth *swa; 1238 int error; 1239 1240 error = swcr_setup_cipher(ses, csp); 1241 if (error) 1242 return (error); 1243 1244 swa = &ses->swcr_auth; 1245 if (csp->csp_auth_mlen == 0) 1246 swa->sw_mlen = ses->swcr_encdec.sw_exf->macsize; 1247 else 1248 swa->sw_mlen = csp->csp_auth_mlen; 1249 return (0); 1250 } 1251 1252 static bool 1253 swcr_auth_supported(const struct crypto_session_params *csp) 1254 { 1255 const struct auth_hash *axf; 1256 1257 axf = crypto_auth_hash(csp); 1258 if (axf == NULL) 1259 return (false); 1260 switch (csp->csp_auth_alg) { 1261 case CRYPTO_SHA1_HMAC: 1262 case CRYPTO_SHA2_224_HMAC: 1263 case CRYPTO_SHA2_256_HMAC: 1264 case CRYPTO_SHA2_384_HMAC: 1265 case CRYPTO_SHA2_512_HMAC: 1266 case CRYPTO_NULL_HMAC: 1267 case CRYPTO_RIPEMD160_HMAC: 1268 break; 1269 case CRYPTO_AES_NIST_GMAC: 1270 switch (csp->csp_auth_klen * 8) { 1271 case 128: 1272 case 192: 1273 case 256: 1274 break; 1275 default: 1276 return (false); 1277 } 1278 if (csp->csp_auth_key == NULL) 1279 return (false); 1280 if (csp->csp_ivlen != AES_GCM_IV_LEN) 1281 return (false); 1282 break; 1283 case CRYPTO_POLY1305: 1284 if (csp->csp_auth_klen != POLY1305_KEY_LEN) 1285 return (false); 1286 break; 1287 case CRYPTO_AES_CCM_CBC_MAC: 1288 switch (csp->csp_auth_klen * 8) { 1289 case 128: 1290 case 192: 1291 case 256: 1292 break; 1293 default: 1294 return (false); 1295 } 1296 if (csp->csp_auth_key == NULL) 1297 return (false); 1298 break; 1299 } 1300 return (true); 1301 } 1302 1303 static bool 1304 swcr_cipher_supported(const struct crypto_session_params *csp) 1305 { 1306 const struct enc_xform *txf; 1307 1308 txf = crypto_cipher(csp); 1309 if (txf == NULL) 1310 return (false); 1311 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC && 1312 txf->ivsize != csp->csp_ivlen) 1313 return (false); 1314 return (true); 1315 } 1316 1317 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN) 1318 1319 static int 1320 swcr_probesession(device_t dev, const struct crypto_session_params *csp) 1321 { 1322 if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0) 1323 return (EINVAL); 1324 switch (csp->csp_mode) { 1325 case CSP_MODE_COMPRESS: 1326 switch (csp->csp_cipher_alg) { 1327 case CRYPTO_DEFLATE_COMP: 1328 break; 1329 default: 1330 return (EINVAL); 1331 } 1332 break; 1333 case CSP_MODE_CIPHER: 1334 switch (csp->csp_cipher_alg) { 1335 case CRYPTO_AES_NIST_GCM_16: 1336 case CRYPTO_AES_CCM_16: 1337 case CRYPTO_CHACHA20_POLY1305: 1338 return (EINVAL); 1339 default: 1340 if (!swcr_cipher_supported(csp)) 1341 return (EINVAL); 1342 break; 1343 } 1344 break; 1345 case CSP_MODE_DIGEST: 1346 if (!swcr_auth_supported(csp)) 1347 return (EINVAL); 1348 break; 1349 case CSP_MODE_AEAD: 1350 switch (csp->csp_cipher_alg) { 1351 case CRYPTO_AES_NIST_GCM_16: 1352 case CRYPTO_AES_CCM_16: 1353 switch (csp->csp_cipher_klen * 8) { 1354 case 128: 1355 case 192: 1356 case 256: 1357 break; 1358 default: 1359 return (EINVAL); 1360 } 1361 break; 1362 case CRYPTO_CHACHA20_POLY1305: 1363 break; 1364 default: 1365 return (EINVAL); 1366 } 1367 break; 1368 case CSP_MODE_ETA: 1369 /* AEAD algorithms cannot be used for EtA. */ 1370 switch (csp->csp_cipher_alg) { 1371 case CRYPTO_AES_NIST_GCM_16: 1372 case CRYPTO_AES_CCM_16: 1373 case CRYPTO_CHACHA20_POLY1305: 1374 return (EINVAL); 1375 } 1376 switch (csp->csp_auth_alg) { 1377 case CRYPTO_AES_NIST_GMAC: 1378 case CRYPTO_AES_CCM_CBC_MAC: 1379 return (EINVAL); 1380 } 1381 1382 if (!swcr_cipher_supported(csp) || 1383 !swcr_auth_supported(csp)) 1384 return (EINVAL); 1385 break; 1386 default: 1387 return (EINVAL); 1388 } 1389 1390 return (CRYPTODEV_PROBE_SOFTWARE); 1391 } 1392 1393 /* 1394 * Generate a new software session. 1395 */ 1396 static int 1397 swcr_newsession(device_t dev, crypto_session_t cses, 1398 const struct crypto_session_params *csp) 1399 { 1400 struct swcr_session *ses; 1401 const struct comp_algo *cxf; 1402 int error; 1403 1404 ses = crypto_get_driver_session(cses); 1405 1406 error = 0; 1407 switch (csp->csp_mode) { 1408 case CSP_MODE_COMPRESS: 1409 switch (csp->csp_cipher_alg) { 1410 case CRYPTO_DEFLATE_COMP: 1411 cxf = &comp_algo_deflate; 1412 break; 1413 #ifdef INVARIANTS 1414 default: 1415 panic("bad compression algo"); 1416 #endif 1417 } 1418 ses->swcr_compdec.sw_cxf = cxf; 1419 ses->swcr_process = swcr_compdec; 1420 break; 1421 case CSP_MODE_CIPHER: 1422 switch (csp->csp_cipher_alg) { 1423 case CRYPTO_NULL_CBC: 1424 ses->swcr_process = swcr_null; 1425 break; 1426 #ifdef INVARIANTS 1427 case CRYPTO_AES_NIST_GCM_16: 1428 case CRYPTO_AES_CCM_16: 1429 case CRYPTO_CHACHA20_POLY1305: 1430 panic("bad cipher algo"); 1431 #endif 1432 default: 1433 error = swcr_setup_cipher(ses, csp); 1434 if (error == 0) 1435 ses->swcr_process = swcr_encdec; 1436 } 1437 break; 1438 case CSP_MODE_DIGEST: 1439 error = swcr_setup_auth(ses, csp); 1440 break; 1441 case CSP_MODE_AEAD: 1442 switch (csp->csp_cipher_alg) { 1443 case CRYPTO_AES_NIST_GCM_16: 1444 error = swcr_setup_aead(ses, csp); 1445 if (error == 0) 1446 ses->swcr_process = swcr_gcm; 1447 break; 1448 case CRYPTO_AES_CCM_16: 1449 error = swcr_setup_aead(ses, csp); 1450 if (error == 0) 1451 ses->swcr_process = swcr_ccm; 1452 break; 1453 case CRYPTO_CHACHA20_POLY1305: 1454 error = swcr_setup_aead(ses, csp); 1455 if (error == 0) 1456 ses->swcr_process = swcr_chacha20_poly1305; 1457 break; 1458 #ifdef INVARIANTS 1459 default: 1460 panic("bad aead algo"); 1461 #endif 1462 } 1463 break; 1464 case CSP_MODE_ETA: 1465 #ifdef INVARIANTS 1466 switch (csp->csp_cipher_alg) { 1467 case CRYPTO_AES_NIST_GCM_16: 1468 case CRYPTO_AES_CCM_16: 1469 case CRYPTO_CHACHA20_POLY1305: 1470 panic("bad eta cipher algo"); 1471 } 1472 switch (csp->csp_auth_alg) { 1473 case CRYPTO_AES_NIST_GMAC: 1474 case CRYPTO_AES_CCM_CBC_MAC: 1475 panic("bad eta auth algo"); 1476 } 1477 #endif 1478 1479 error = swcr_setup_auth(ses, csp); 1480 if (error) 1481 break; 1482 if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) { 1483 /* Effectively degrade to digest mode. */ 1484 ses->swcr_process = swcr_authcompute; 1485 break; 1486 } 1487 1488 error = swcr_setup_cipher(ses, csp); 1489 if (error == 0) 1490 ses->swcr_process = swcr_eta; 1491 break; 1492 default: 1493 error = EINVAL; 1494 } 1495 1496 if (error) 1497 swcr_freesession(dev, cses); 1498 return (error); 1499 } 1500 1501 static void 1502 swcr_freesession(device_t dev, crypto_session_t cses) 1503 { 1504 struct swcr_session *ses; 1505 1506 ses = crypto_get_driver_session(cses); 1507 1508 zfree(ses->swcr_encdec.sw_ctx, M_CRYPTO_DATA); 1509 zfree(ses->swcr_auth.sw_ictx, M_CRYPTO_DATA); 1510 zfree(ses->swcr_auth.sw_octx, M_CRYPTO_DATA); 1511 } 1512 1513 /* 1514 * Process a software request. 1515 */ 1516 static int 1517 swcr_process(device_t dev, struct cryptop *crp, int hint) 1518 { 1519 struct swcr_session *ses; 1520 1521 ses = crypto_get_driver_session(crp->crp_session); 1522 1523 crp->crp_etype = ses->swcr_process(ses, crp); 1524 1525 crypto_done(crp); 1526 return (0); 1527 } 1528 1529 static void 1530 swcr_identify(driver_t *drv, device_t parent) 1531 { 1532 /* NB: order 10 is so we get attached after h/w devices */ 1533 if (device_find_child(parent, "cryptosoft", -1) == NULL && 1534 BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0) 1535 panic("cryptosoft: could not attach"); 1536 } 1537 1538 static int 1539 swcr_probe(device_t dev) 1540 { 1541 device_set_desc(dev, "software crypto"); 1542 device_quiet(dev); 1543 return (BUS_PROBE_NOWILDCARD); 1544 } 1545 1546 static int 1547 swcr_attach(device_t dev) 1548 { 1549 1550 swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session), 1551 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); 1552 if (swcr_id < 0) { 1553 device_printf(dev, "cannot initialize!"); 1554 return (ENXIO); 1555 } 1556 1557 return (0); 1558 } 1559 1560 static int 1561 swcr_detach(device_t dev) 1562 { 1563 crypto_unregister_all(swcr_id); 1564 return 0; 1565 } 1566 1567 static device_method_t swcr_methods[] = { 1568 DEVMETHOD(device_identify, swcr_identify), 1569 DEVMETHOD(device_probe, swcr_probe), 1570 DEVMETHOD(device_attach, swcr_attach), 1571 DEVMETHOD(device_detach, swcr_detach), 1572 1573 DEVMETHOD(cryptodev_probesession, swcr_probesession), 1574 DEVMETHOD(cryptodev_newsession, swcr_newsession), 1575 DEVMETHOD(cryptodev_freesession,swcr_freesession), 1576 DEVMETHOD(cryptodev_process, swcr_process), 1577 1578 {0, 0}, 1579 }; 1580 1581 static driver_t swcr_driver = { 1582 "cryptosoft", 1583 swcr_methods, 1584 0, /* NB: no softc */ 1585 }; 1586 static devclass_t swcr_devclass; 1587 1588 /* 1589 * NB: We explicitly reference the crypto module so we 1590 * get the necessary ordering when built as a loadable 1591 * module. This is required because we bundle the crypto 1592 * module code together with the cryptosoft driver (otherwise 1593 * normal module dependencies would handle things). 1594 */ 1595 extern int crypto_modevent(struct module *, int, void *); 1596 /* XXX where to attach */ 1597 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); 1598 MODULE_VERSION(cryptosoft, 1); 1599 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1); 1600