1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2019 Netflix Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/counter.h> 35 #include <sys/endian.h> 36 #include <sys/kernel.h> 37 #include <sys/ktls.h> 38 #include <sys/lock.h> 39 #include <sys/malloc.h> 40 #include <sys/module.h> 41 #include <sys/mutex.h> 42 #include <sys/sysctl.h> 43 #include <sys/uio.h> 44 #include <opencrypto/cryptodev.h> 45 46 struct ocf_session { 47 crypto_session_t sid; 48 crypto_session_t mac_sid; 49 int mac_len; 50 struct mtx lock; 51 bool implicit_iv; 52 53 /* Only used for TLS 1.0 with the implicit IV. */ 54 #ifdef INVARIANTS 55 bool in_progress; 56 uint64_t next_seqno; 57 #endif 58 char iv[AES_BLOCK_LEN]; 59 }; 60 61 struct ocf_operation { 62 struct ocf_session *os; 63 bool done; 64 }; 65 66 static MALLOC_DEFINE(M_KTLS_OCF, "ktls_ocf", "OCF KTLS"); 67 68 SYSCTL_DECL(_kern_ipc_tls); 69 SYSCTL_DECL(_kern_ipc_tls_stats); 70 71 static SYSCTL_NODE(_kern_ipc_tls_stats, OID_AUTO, ocf, 72 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 73 "Kernel TLS offload via OCF stats"); 74 75 static COUNTER_U64_DEFINE_EARLY(ocf_tls10_cbc_crypts); 76 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls10_cbc_crypts, 77 CTLFLAG_RD, &ocf_tls10_cbc_crypts, 78 "Total number of OCF TLS 1.0 CBC encryption operations"); 79 80 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_crypts); 81 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_crypts, 82 CTLFLAG_RD, &ocf_tls11_cbc_crypts, 83 "Total number of OCF TLS 1.1/1.2 CBC encryption operations"); 84 85 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_crypts); 86 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_crypts, 87 CTLFLAG_RD, &ocf_tls12_gcm_crypts, 88 "Total number of OCF TLS 1.2 GCM encryption operations"); 89 90 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_crypts); 91 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_crypts, 92 CTLFLAG_RD, &ocf_tls13_gcm_crypts, 93 "Total number of OCF TLS 1.3 GCM encryption operations"); 94 95 static COUNTER_U64_DEFINE_EARLY(ocf_inplace); 96 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, inplace, 97 CTLFLAG_RD, &ocf_inplace, 98 "Total number of OCF in-place operations"); 99 100 static COUNTER_U64_DEFINE_EARLY(ocf_separate_output); 101 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, separate_output, 102 CTLFLAG_RD, &ocf_separate_output, 103 "Total number of OCF operations with a separate output buffer"); 104 105 static COUNTER_U64_DEFINE_EARLY(ocf_retries); 106 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, retries, CTLFLAG_RD, 107 &ocf_retries, 108 "Number of OCF encryption operation retries"); 109 110 static int 111 ktls_ocf_callback_sync(struct cryptop *crp __unused) 112 { 113 return (0); 114 } 115 116 static int 117 ktls_ocf_callback_async(struct cryptop *crp) 118 { 119 struct ocf_operation *oo; 120 121 oo = crp->crp_opaque; 122 mtx_lock(&oo->os->lock); 123 oo->done = true; 124 mtx_unlock(&oo->os->lock); 125 wakeup(oo); 126 return (0); 127 } 128 129 static int 130 ktls_ocf_dispatch(struct ocf_session *os, struct cryptop *crp) 131 { 132 struct ocf_operation oo; 133 int error; 134 bool async; 135 136 oo.os = os; 137 oo.done = false; 138 139 crp->crp_opaque = &oo; 140 for (;;) { 141 async = !CRYPTO_SESS_SYNC(crp->crp_session); 142 crp->crp_callback = async ? ktls_ocf_callback_async : 143 ktls_ocf_callback_sync; 144 145 error = crypto_dispatch(crp); 146 if (error) 147 break; 148 if (async) { 149 mtx_lock(&os->lock); 150 while (!oo.done) 151 mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0); 152 mtx_unlock(&os->lock); 153 } 154 155 if (crp->crp_etype != EAGAIN) { 156 error = crp->crp_etype; 157 break; 158 } 159 160 crp->crp_etype = 0; 161 crp->crp_flags &= ~CRYPTO_F_DONE; 162 oo.done = false; 163 counter_u64_add(ocf_retries, 1); 164 } 165 return (error); 166 } 167 168 static int 169 ktls_ocf_tls_cbc_encrypt(struct ktls_session *tls, 170 const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov, 171 struct iovec *outiov, int iovcnt, uint64_t seqno, 172 uint8_t record_type __unused) 173 { 174 struct uio uio, out_uio; 175 struct tls_mac_data ad; 176 struct cryptop crp; 177 struct ocf_session *os; 178 struct iovec iov[iovcnt + 2]; 179 struct iovec out_iov[iovcnt + 1]; 180 int i, error; 181 uint16_t tls_comp_len; 182 uint8_t pad; 183 bool inplace; 184 185 os = tls->cipher; 186 187 #ifdef INVARIANTS 188 if (os->implicit_iv) { 189 mtx_lock(&os->lock); 190 KASSERT(!os->in_progress, 191 ("concurrent implicit IV encryptions")); 192 if (os->next_seqno != seqno) { 193 printf("KTLS CBC: TLS records out of order. " 194 "Expected %ju, got %ju\n", 195 (uintmax_t)os->next_seqno, (uintmax_t)seqno); 196 mtx_unlock(&os->lock); 197 return (EINVAL); 198 } 199 os->in_progress = true; 200 mtx_unlock(&os->lock); 201 } 202 #endif 203 204 /* 205 * Compute the payload length. 206 * 207 * XXX: This could be easily computed O(1) from the mbuf 208 * fields, but we don't have those accessible here. Can 209 * at least compute inplace as well while we are here. 210 */ 211 tls_comp_len = 0; 212 inplace = true; 213 for (i = 0; i < iovcnt; i++) { 214 tls_comp_len += iniov[i].iov_len; 215 if (iniov[i].iov_base != outiov[i].iov_base) 216 inplace = false; 217 } 218 219 /* Initialize the AAD. */ 220 ad.seq = htobe64(seqno); 221 ad.type = hdr->tls_type; 222 ad.tls_vmajor = hdr->tls_vmajor; 223 ad.tls_vminor = hdr->tls_vminor; 224 ad.tls_length = htons(tls_comp_len); 225 226 /* First, compute the MAC. */ 227 iov[0].iov_base = &ad; 228 iov[0].iov_len = sizeof(ad); 229 memcpy(&iov[1], iniov, sizeof(*iniov) * iovcnt); 230 iov[iovcnt + 1].iov_base = trailer; 231 iov[iovcnt + 1].iov_len = os->mac_len; 232 uio.uio_iov = iov; 233 uio.uio_iovcnt = iovcnt + 2; 234 uio.uio_offset = 0; 235 uio.uio_segflg = UIO_SYSSPACE; 236 uio.uio_td = curthread; 237 uio.uio_resid = sizeof(ad) + tls_comp_len + os->mac_len; 238 239 crypto_initreq(&crp, os->mac_sid); 240 crp.crp_payload_start = 0; 241 crp.crp_payload_length = sizeof(ad) + tls_comp_len; 242 crp.crp_digest_start = crp.crp_payload_length; 243 crp.crp_op = CRYPTO_OP_COMPUTE_DIGEST; 244 crp.crp_flags = CRYPTO_F_CBIMM; 245 crypto_use_uio(&crp, &uio); 246 error = ktls_ocf_dispatch(os, &crp); 247 248 crypto_destroyreq(&crp); 249 if (error) { 250 #ifdef INVARIANTS 251 if (os->implicit_iv) { 252 mtx_lock(&os->lock); 253 os->in_progress = false; 254 mtx_unlock(&os->lock); 255 } 256 #endif 257 return (error); 258 } 259 260 /* Second, add the padding. */ 261 pad = (unsigned)(AES_BLOCK_LEN - (tls_comp_len + os->mac_len + 1)) % 262 AES_BLOCK_LEN; 263 for (i = 0; i < pad + 1; i++) 264 trailer[os->mac_len + i] = pad; 265 266 /* Finally, encrypt the record. */ 267 268 /* 269 * Don't recopy the input iovec, instead just adjust the 270 * trailer length and skip over the AAD vector in the uio. 271 */ 272 iov[iovcnt + 1].iov_len += pad + 1; 273 uio.uio_iov = iov + 1; 274 uio.uio_iovcnt = iovcnt + 1; 275 uio.uio_resid = tls_comp_len + iov[iovcnt + 1].iov_len; 276 KASSERT(uio.uio_resid % AES_BLOCK_LEN == 0, 277 ("invalid encryption size")); 278 279 crypto_initreq(&crp, os->sid); 280 crp.crp_payload_start = 0; 281 crp.crp_payload_length = uio.uio_resid; 282 crp.crp_op = CRYPTO_OP_ENCRYPT; 283 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 284 if (os->implicit_iv) 285 memcpy(crp.crp_iv, os->iv, AES_BLOCK_LEN); 286 else 287 memcpy(crp.crp_iv, hdr + 1, AES_BLOCK_LEN); 288 crypto_use_uio(&crp, &uio); 289 if (!inplace) { 290 memcpy(out_iov, outiov, sizeof(*iniov) * iovcnt); 291 out_iov[iovcnt] = iov[iovcnt + 1]; 292 out_uio.uio_iov = out_iov; 293 out_uio.uio_iovcnt = iovcnt + 1; 294 out_uio.uio_offset = 0; 295 out_uio.uio_segflg = UIO_SYSSPACE; 296 out_uio.uio_td = curthread; 297 out_uio.uio_resid = uio.uio_resid; 298 crypto_use_output_uio(&crp, &out_uio); 299 } 300 301 if (os->implicit_iv) 302 counter_u64_add(ocf_tls10_cbc_crypts, 1); 303 else 304 counter_u64_add(ocf_tls11_cbc_crypts, 1); 305 if (inplace) 306 counter_u64_add(ocf_inplace, 1); 307 else 308 counter_u64_add(ocf_separate_output, 1); 309 error = ktls_ocf_dispatch(os, &crp); 310 311 crypto_destroyreq(&crp); 312 313 if (os->implicit_iv) { 314 KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN, 315 ("trailer too short to read IV")); 316 memcpy(os->iv, trailer + os->mac_len + pad + 1 - AES_BLOCK_LEN, 317 AES_BLOCK_LEN); 318 #ifdef INVARIANTS 319 mtx_lock(&os->lock); 320 os->next_seqno = seqno + 1; 321 os->in_progress = false; 322 mtx_unlock(&os->lock); 323 #endif 324 } 325 return (error); 326 } 327 328 static int 329 ktls_ocf_tls12_gcm_encrypt(struct ktls_session *tls, 330 const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov, 331 struct iovec *outiov, int iovcnt, uint64_t seqno, 332 uint8_t record_type __unused) 333 { 334 struct uio uio, out_uio, *tag_uio; 335 struct tls_aead_data ad; 336 struct cryptop crp; 337 struct ocf_session *os; 338 struct iovec iov[iovcnt + 1]; 339 int i, error; 340 uint16_t tls_comp_len; 341 bool inplace; 342 343 os = tls->cipher; 344 345 uio.uio_iov = iniov; 346 uio.uio_iovcnt = iovcnt; 347 uio.uio_offset = 0; 348 uio.uio_segflg = UIO_SYSSPACE; 349 uio.uio_td = curthread; 350 351 out_uio.uio_iov = outiov; 352 out_uio.uio_iovcnt = iovcnt; 353 out_uio.uio_offset = 0; 354 out_uio.uio_segflg = UIO_SYSSPACE; 355 out_uio.uio_td = curthread; 356 357 crypto_initreq(&crp, os->sid); 358 359 /* Setup the IV. */ 360 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN); 361 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t)); 362 363 /* Setup the AAD. */ 364 tls_comp_len = ntohs(hdr->tls_length) - 365 (AES_GMAC_HASH_LEN + sizeof(uint64_t)); 366 ad.seq = htobe64(seqno); 367 ad.type = hdr->tls_type; 368 ad.tls_vmajor = hdr->tls_vmajor; 369 ad.tls_vminor = hdr->tls_vminor; 370 ad.tls_length = htons(tls_comp_len); 371 crp.crp_aad = &ad; 372 crp.crp_aad_length = sizeof(ad); 373 374 /* Compute payload length and determine if encryption is in place. */ 375 inplace = true; 376 crp.crp_payload_start = 0; 377 for (i = 0; i < iovcnt; i++) { 378 if (iniov[i].iov_base != outiov[i].iov_base) 379 inplace = false; 380 crp.crp_payload_length += iniov[i].iov_len; 381 } 382 uio.uio_resid = crp.crp_payload_length; 383 out_uio.uio_resid = crp.crp_payload_length; 384 385 if (inplace) 386 tag_uio = &uio; 387 else 388 tag_uio = &out_uio; 389 390 /* Duplicate iovec and append vector for tag. */ 391 memcpy(iov, tag_uio->uio_iov, iovcnt * sizeof(struct iovec)); 392 iov[iovcnt].iov_base = trailer; 393 iov[iovcnt].iov_len = AES_GMAC_HASH_LEN; 394 tag_uio->uio_iov = iov; 395 tag_uio->uio_iovcnt++; 396 crp.crp_digest_start = tag_uio->uio_resid; 397 tag_uio->uio_resid += AES_GMAC_HASH_LEN; 398 399 crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; 400 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 401 crypto_use_uio(&crp, &uio); 402 if (!inplace) 403 crypto_use_output_uio(&crp, &out_uio); 404 405 counter_u64_add(ocf_tls12_gcm_crypts, 1); 406 if (inplace) 407 counter_u64_add(ocf_inplace, 1); 408 else 409 counter_u64_add(ocf_separate_output, 1); 410 error = ktls_ocf_dispatch(os, &crp); 411 412 crypto_destroyreq(&crp); 413 return (error); 414 } 415 416 static int 417 ktls_ocf_tls12_gcm_decrypt(struct ktls_session *tls, 418 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno, 419 int *trailer_len) 420 { 421 struct tls_aead_data ad; 422 struct cryptop crp; 423 struct ocf_session *os; 424 struct ocf_operation oo; 425 int error; 426 uint16_t tls_comp_len; 427 428 os = tls->cipher; 429 430 oo.os = os; 431 oo.done = false; 432 433 crypto_initreq(&crp, os->sid); 434 435 /* Setup the IV. */ 436 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN); 437 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t)); 438 439 /* Setup the AAD. */ 440 tls_comp_len = ntohs(hdr->tls_length) - 441 (AES_GMAC_HASH_LEN + sizeof(uint64_t)); 442 ad.seq = htobe64(seqno); 443 ad.type = hdr->tls_type; 444 ad.tls_vmajor = hdr->tls_vmajor; 445 ad.tls_vminor = hdr->tls_vminor; 446 ad.tls_length = htons(tls_comp_len); 447 crp.crp_aad = &ad; 448 crp.crp_aad_length = sizeof(ad); 449 450 crp.crp_payload_start = tls->params.tls_hlen; 451 crp.crp_payload_length = tls_comp_len; 452 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length; 453 454 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST; 455 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 456 crypto_use_mbuf(&crp, m); 457 458 counter_u64_add(ocf_tls12_gcm_crypts, 1); 459 error = ktls_ocf_dispatch(os, &crp); 460 461 crypto_destroyreq(&crp); 462 *trailer_len = AES_GMAC_HASH_LEN; 463 return (error); 464 } 465 466 static int 467 ktls_ocf_tls13_gcm_encrypt(struct ktls_session *tls, 468 const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov, 469 struct iovec *outiov, int iovcnt, uint64_t seqno, uint8_t record_type) 470 { 471 struct uio uio, out_uio; 472 struct tls_aead_data_13 ad; 473 char nonce[12]; 474 struct cryptop crp; 475 struct ocf_session *os; 476 struct iovec iov[iovcnt + 1], out_iov[iovcnt + 1]; 477 int i, error; 478 bool inplace; 479 480 os = tls->cipher; 481 482 crypto_initreq(&crp, os->sid); 483 484 /* Setup the nonce. */ 485 memcpy(nonce, tls->params.iv, tls->params.iv_len); 486 *(uint64_t *)(nonce + 4) ^= htobe64(seqno); 487 488 /* Setup the AAD. */ 489 ad.type = hdr->tls_type; 490 ad.tls_vmajor = hdr->tls_vmajor; 491 ad.tls_vminor = hdr->tls_vminor; 492 ad.tls_length = hdr->tls_length; 493 crp.crp_aad = &ad; 494 crp.crp_aad_length = sizeof(ad); 495 496 /* Compute payload length and determine if encryption is in place. */ 497 inplace = true; 498 crp.crp_payload_start = 0; 499 for (i = 0; i < iovcnt; i++) { 500 if (iniov[i].iov_base != outiov[i].iov_base) 501 inplace = false; 502 crp.crp_payload_length += iniov[i].iov_len; 503 } 504 505 /* Store the record type as the first byte of the trailer. */ 506 trailer[0] = record_type; 507 crp.crp_payload_length++; 508 crp.crp_digest_start = crp.crp_payload_length; 509 510 /* 511 * Duplicate the input iov to append the trailer. Always 512 * include the full trailer as input to get the record_type 513 * even if only the first byte is used. 514 */ 515 memcpy(iov, iniov, iovcnt * sizeof(*iov)); 516 iov[iovcnt].iov_base = trailer; 517 iov[iovcnt].iov_len = AES_GMAC_HASH_LEN + 1; 518 uio.uio_iov = iov; 519 uio.uio_iovcnt = iovcnt + 1; 520 uio.uio_offset = 0; 521 uio.uio_resid = crp.crp_payload_length + AES_GMAC_HASH_LEN; 522 uio.uio_segflg = UIO_SYSSPACE; 523 uio.uio_td = curthread; 524 crypto_use_uio(&crp, &uio); 525 526 if (!inplace) { 527 /* Duplicate the output iov to append the trailer. */ 528 memcpy(out_iov, outiov, iovcnt * sizeof(*out_iov)); 529 out_iov[iovcnt] = iov[iovcnt]; 530 531 out_uio.uio_iov = out_iov; 532 out_uio.uio_iovcnt = iovcnt + 1; 533 out_uio.uio_offset = 0; 534 out_uio.uio_resid = crp.crp_payload_length + 535 AES_GMAC_HASH_LEN; 536 out_uio.uio_segflg = UIO_SYSSPACE; 537 out_uio.uio_td = curthread; 538 crypto_use_output_uio(&crp, &out_uio); 539 } 540 541 crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; 542 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 543 544 memcpy(crp.crp_iv, nonce, sizeof(nonce)); 545 546 counter_u64_add(ocf_tls13_gcm_crypts, 1); 547 if (inplace) 548 counter_u64_add(ocf_inplace, 1); 549 else 550 counter_u64_add(ocf_separate_output, 1); 551 error = ktls_ocf_dispatch(os, &crp); 552 553 crypto_destroyreq(&crp); 554 return (error); 555 } 556 557 static void 558 ktls_ocf_free(struct ktls_session *tls) 559 { 560 struct ocf_session *os; 561 562 os = tls->cipher; 563 crypto_freesession(os->sid); 564 mtx_destroy(&os->lock); 565 zfree(os, M_KTLS_OCF); 566 } 567 568 static int 569 ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction) 570 { 571 struct crypto_session_params csp, mac_csp; 572 struct ocf_session *os; 573 int error, mac_len; 574 575 memset(&csp, 0, sizeof(csp)); 576 memset(&mac_csp, 0, sizeof(mac_csp)); 577 mac_csp.csp_mode = CSP_MODE_NONE; 578 mac_len = 0; 579 580 switch (tls->params.cipher_algorithm) { 581 case CRYPTO_AES_NIST_GCM_16: 582 switch (tls->params.cipher_key_len) { 583 case 128 / 8: 584 case 256 / 8: 585 break; 586 default: 587 return (EINVAL); 588 } 589 590 /* Only TLS 1.2 and 1.3 are supported. */ 591 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 592 tls->params.tls_vminor < TLS_MINOR_VER_TWO || 593 tls->params.tls_vminor > TLS_MINOR_VER_THREE) 594 return (EPROTONOSUPPORT); 595 596 /* TLS 1.3 is not yet supported for receive. */ 597 if (direction == KTLS_RX && 598 tls->params.tls_vminor == TLS_MINOR_VER_THREE) 599 return (EPROTONOSUPPORT); 600 601 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD; 602 csp.csp_mode = CSP_MODE_AEAD; 603 csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16; 604 csp.csp_cipher_key = tls->params.cipher_key; 605 csp.csp_cipher_klen = tls->params.cipher_key_len; 606 csp.csp_ivlen = AES_GCM_IV_LEN; 607 break; 608 case CRYPTO_AES_CBC: 609 switch (tls->params.cipher_key_len) { 610 case 128 / 8: 611 case 256 / 8: 612 break; 613 default: 614 return (EINVAL); 615 } 616 617 switch (tls->params.auth_algorithm) { 618 case CRYPTO_SHA1_HMAC: 619 mac_len = SHA1_HASH_LEN; 620 break; 621 case CRYPTO_SHA2_256_HMAC: 622 mac_len = SHA2_256_HASH_LEN; 623 break; 624 case CRYPTO_SHA2_384_HMAC: 625 mac_len = SHA2_384_HASH_LEN; 626 break; 627 default: 628 return (EINVAL); 629 } 630 631 /* Only TLS 1.0-1.2 are supported. */ 632 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 633 tls->params.tls_vminor < TLS_MINOR_VER_ZERO || 634 tls->params.tls_vminor > TLS_MINOR_VER_TWO) 635 return (EPROTONOSUPPORT); 636 637 /* AES-CBC is not supported for receive. */ 638 if (direction == KTLS_RX) 639 return (EPROTONOSUPPORT); 640 641 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; 642 csp.csp_mode = CSP_MODE_CIPHER; 643 csp.csp_cipher_alg = CRYPTO_AES_CBC; 644 csp.csp_cipher_key = tls->params.cipher_key; 645 csp.csp_cipher_klen = tls->params.cipher_key_len; 646 csp.csp_ivlen = AES_BLOCK_LEN; 647 648 mac_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; 649 mac_csp.csp_mode = CSP_MODE_DIGEST; 650 mac_csp.csp_auth_alg = tls->params.auth_algorithm; 651 mac_csp.csp_auth_key = tls->params.auth_key; 652 mac_csp.csp_auth_klen = tls->params.auth_key_len; 653 break; 654 default: 655 return (EPROTONOSUPPORT); 656 } 657 658 os = malloc(sizeof(*os), M_KTLS_OCF, M_NOWAIT | M_ZERO); 659 if (os == NULL) 660 return (ENOMEM); 661 662 error = crypto_newsession(&os->sid, &csp, 663 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE); 664 if (error) { 665 free(os, M_KTLS_OCF); 666 return (error); 667 } 668 669 if (mac_csp.csp_mode != CSP_MODE_NONE) { 670 error = crypto_newsession(&os->mac_sid, &mac_csp, 671 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE); 672 if (error) { 673 crypto_freesession(os->sid); 674 free(os, M_KTLS_OCF); 675 return (error); 676 } 677 os->mac_len = mac_len; 678 } 679 680 mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF); 681 tls->cipher = os; 682 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) { 683 if (direction == KTLS_TX) { 684 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE) 685 tls->sw_encrypt = ktls_ocf_tls13_gcm_encrypt; 686 else 687 tls->sw_encrypt = ktls_ocf_tls12_gcm_encrypt; 688 } else { 689 tls->sw_decrypt = ktls_ocf_tls12_gcm_decrypt; 690 } 691 } else { 692 tls->sw_encrypt = ktls_ocf_tls_cbc_encrypt; 693 if (tls->params.tls_vminor == TLS_MINOR_VER_ZERO) { 694 os->implicit_iv = true; 695 memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN); 696 } 697 } 698 tls->free = ktls_ocf_free; 699 return (0); 700 } 701 702 struct ktls_crypto_backend ocf_backend = { 703 .name = "OCF", 704 .prio = 5, 705 .api_version = KTLS_API_VERSION, 706 .try = ktls_ocf_try, 707 }; 708 709 static int 710 ktls_ocf_modevent(module_t mod, int what, void *arg) 711 { 712 switch (what) { 713 case MOD_LOAD: 714 return (ktls_crypto_backend_register(&ocf_backend)); 715 case MOD_UNLOAD: 716 return (ktls_crypto_backend_deregister(&ocf_backend)); 717 default: 718 return (EOPNOTSUPP); 719 } 720 } 721 722 static moduledata_t ktls_ocf_moduledata = { 723 "ktls_ocf", 724 ktls_ocf_modevent, 725 NULL 726 }; 727 728 DECLARE_MODULE(ktls_ocf, ktls_ocf_moduledata, SI_SUB_PROTO_END, SI_ORDER_ANY); 729