1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2019 Netflix Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/counter.h> 31 #include <sys/endian.h> 32 #include <sys/kernel.h> 33 #include <sys/ktls.h> 34 #include <sys/lock.h> 35 #include <sys/malloc.h> 36 #include <sys/mbuf.h> 37 #include <sys/module.h> 38 #include <sys/mutex.h> 39 #include <sys/sysctl.h> 40 #include <sys/uio.h> 41 #include <vm/vm.h> 42 #include <vm/pmap.h> 43 #include <vm/vm_param.h> 44 #include <netinet/in.h> 45 #include <opencrypto/cryptodev.h> 46 #include <opencrypto/ktls.h> 47 48 struct ktls_ocf_sw { 49 /* Encrypt a single outbound TLS record. */ 50 int (*encrypt)(struct ktls_ocf_encrypt_state *state, 51 struct ktls_session *tls, struct mbuf *m, 52 struct iovec *outiov, int outiovcnt); 53 54 /* Re-encrypt a received TLS record that is partially decrypted. */ 55 int (*recrypt)(struct ktls_session *tls, 56 const struct tls_record_layer *hdr, struct mbuf *m, 57 uint64_t seqno); 58 59 /* Decrypt a received TLS record. */ 60 int (*decrypt)(struct ktls_session *tls, 61 const struct tls_record_layer *hdr, struct mbuf *m, 62 uint64_t seqno, int *trailer_len); 63 }; 64 65 struct ktls_ocf_session { 66 const struct ktls_ocf_sw *sw; 67 crypto_session_t sid; 68 crypto_session_t mac_sid; 69 crypto_session_t recrypt_sid; 70 struct mtx lock; 71 int mac_len; 72 bool implicit_iv; 73 74 /* Only used for TLS 1.0 with the implicit IV. */ 75 #ifdef INVARIANTS 76 bool in_progress; 77 uint64_t next_seqno; 78 #endif 79 char iv[AES_BLOCK_LEN]; 80 }; 81 82 struct ocf_operation { 83 struct ktls_ocf_session *os; 84 bool done; 85 }; 86 87 static MALLOC_DEFINE(M_KTLS_OCF, "ktls_ocf", "OCF KTLS"); 88 89 SYSCTL_DECL(_kern_ipc_tls); 90 SYSCTL_DECL(_kern_ipc_tls_stats); 91 92 static SYSCTL_NODE(_kern_ipc_tls_stats, OID_AUTO, ocf, 93 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 94 "Kernel TLS offload via OCF stats"); 95 96 static COUNTER_U64_DEFINE_EARLY(ocf_tls10_cbc_encrypts); 97 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls10_cbc_encrypts, 98 CTLFLAG_RD, &ocf_tls10_cbc_encrypts, 99 "Total number of OCF TLS 1.0 CBC encryption operations"); 100 101 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_decrypts); 102 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_decrypts, 103 CTLFLAG_RD, &ocf_tls11_cbc_decrypts, 104 "Total number of OCF TLS 1.1/1.2 CBC decryption operations"); 105 106 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_encrypts); 107 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_encrypts, 108 CTLFLAG_RD, &ocf_tls11_cbc_encrypts, 109 "Total number of OCF TLS 1.1/1.2 CBC encryption operations"); 110 111 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_decrypts); 112 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_decrypts, 113 CTLFLAG_RD, &ocf_tls12_gcm_decrypts, 114 "Total number of OCF TLS 1.2 GCM decryption operations"); 115 116 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_encrypts); 117 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_encrypts, 118 CTLFLAG_RD, &ocf_tls12_gcm_encrypts, 119 "Total number of OCF TLS 1.2 GCM encryption operations"); 120 121 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_recrypts); 122 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_recrypts, 123 CTLFLAG_RD, &ocf_tls12_gcm_recrypts, 124 "Total number of OCF TLS 1.2 GCM re-encryption operations"); 125 126 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_decrypts); 127 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_decrypts, 128 CTLFLAG_RD, &ocf_tls12_chacha20_decrypts, 129 "Total number of OCF TLS 1.2 Chacha20-Poly1305 decryption operations"); 130 131 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_encrypts); 132 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_encrypts, 133 CTLFLAG_RD, &ocf_tls12_chacha20_encrypts, 134 "Total number of OCF TLS 1.2 Chacha20-Poly1305 encryption operations"); 135 136 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_decrypts); 137 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_decrypts, 138 CTLFLAG_RD, &ocf_tls13_gcm_decrypts, 139 "Total number of OCF TLS 1.3 GCM decryption operations"); 140 141 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_encrypts); 142 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_encrypts, 143 CTLFLAG_RD, &ocf_tls13_gcm_encrypts, 144 "Total number of OCF TLS 1.3 GCM encryption operations"); 145 146 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_recrypts); 147 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_recrypts, 148 CTLFLAG_RD, &ocf_tls13_gcm_recrypts, 149 "Total number of OCF TLS 1.3 GCM re-encryption operations"); 150 151 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_decrypts); 152 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_decrypts, 153 CTLFLAG_RD, &ocf_tls13_chacha20_decrypts, 154 "Total number of OCF TLS 1.3 Chacha20-Poly1305 decryption operations"); 155 156 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_encrypts); 157 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_encrypts, 158 CTLFLAG_RD, &ocf_tls13_chacha20_encrypts, 159 "Total number of OCF TLS 1.3 Chacha20-Poly1305 encryption operations"); 160 161 static COUNTER_U64_DEFINE_EARLY(ocf_inplace); 162 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, inplace, 163 CTLFLAG_RD, &ocf_inplace, 164 "Total number of OCF in-place operations"); 165 166 static COUNTER_U64_DEFINE_EARLY(ocf_separate_output); 167 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, separate_output, 168 CTLFLAG_RD, &ocf_separate_output, 169 "Total number of OCF operations with a separate output buffer"); 170 171 static COUNTER_U64_DEFINE_EARLY(ocf_retries); 172 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, retries, CTLFLAG_RD, 173 &ocf_retries, 174 "Number of OCF encryption operation retries"); 175 176 static int 177 ktls_ocf_callback_sync(struct cryptop *crp __unused) 178 { 179 return (0); 180 } 181 182 static int 183 ktls_ocf_callback_async(struct cryptop *crp) 184 { 185 struct ocf_operation *oo; 186 187 oo = crp->crp_opaque; 188 mtx_lock(&oo->os->lock); 189 oo->done = true; 190 mtx_unlock(&oo->os->lock); 191 wakeup(oo); 192 return (0); 193 } 194 195 static int 196 ktls_ocf_dispatch(struct ktls_ocf_session *os, struct cryptop *crp) 197 { 198 struct ocf_operation oo; 199 int error; 200 bool async; 201 202 oo.os = os; 203 oo.done = false; 204 205 crp->crp_opaque = &oo; 206 for (;;) { 207 async = !CRYPTO_SESS_SYNC(crp->crp_session); 208 crp->crp_callback = async ? ktls_ocf_callback_async : 209 ktls_ocf_callback_sync; 210 211 error = crypto_dispatch(crp); 212 if (error) 213 break; 214 if (async) { 215 mtx_lock(&os->lock); 216 while (!oo.done) 217 mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0); 218 mtx_unlock(&os->lock); 219 } 220 221 if (crp->crp_etype != EAGAIN) { 222 error = crp->crp_etype; 223 break; 224 } 225 226 crp->crp_etype = 0; 227 crp->crp_flags &= ~CRYPTO_F_DONE; 228 oo.done = false; 229 counter_u64_add(ocf_retries, 1); 230 } 231 return (error); 232 } 233 234 static int 235 ktls_ocf_dispatch_async_cb(struct cryptop *crp) 236 { 237 struct ktls_ocf_encrypt_state *state; 238 int error; 239 240 state = crp->crp_opaque; 241 if (crp->crp_etype == EAGAIN) { 242 crp->crp_etype = 0; 243 crp->crp_flags &= ~CRYPTO_F_DONE; 244 counter_u64_add(ocf_retries, 1); 245 error = crypto_dispatch(crp); 246 if (error != 0) { 247 crypto_destroyreq(crp); 248 ktls_encrypt_cb(state, error); 249 } 250 return (0); 251 } 252 253 error = crp->crp_etype; 254 crypto_destroyreq(crp); 255 ktls_encrypt_cb(state, error); 256 return (0); 257 } 258 259 static int 260 ktls_ocf_dispatch_async(struct ktls_ocf_encrypt_state *state, 261 struct cryptop *crp) 262 { 263 int error; 264 265 crp->crp_opaque = state; 266 crp->crp_callback = ktls_ocf_dispatch_async_cb; 267 error = crypto_dispatch(crp); 268 if (error != 0) 269 crypto_destroyreq(crp); 270 return (error); 271 } 272 273 static int 274 ktls_ocf_tls_cbc_encrypt(struct ktls_ocf_encrypt_state *state, 275 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 276 int outiovcnt) 277 { 278 const struct tls_record_layer *hdr; 279 struct uio *uio; 280 struct tls_mac_data *ad; 281 struct cryptop *crp; 282 struct ktls_ocf_session *os; 283 struct iovec iov[m->m_epg_npgs + 2]; 284 u_int pgoff; 285 int i, error; 286 uint16_t tls_comp_len; 287 uint8_t pad; 288 289 MPASS(outiovcnt + 1 <= nitems(iov)); 290 291 os = tls->ocf_session; 292 hdr = (const struct tls_record_layer *)m->m_epg_hdr; 293 crp = &state->crp; 294 uio = &state->uio; 295 MPASS(tls->sync_dispatch); 296 297 #ifdef INVARIANTS 298 if (os->implicit_iv) { 299 mtx_lock(&os->lock); 300 KASSERT(!os->in_progress, 301 ("concurrent implicit IV encryptions")); 302 if (os->next_seqno != m->m_epg_seqno) { 303 printf("KTLS CBC: TLS records out of order. " 304 "Expected %ju, got %ju\n", 305 (uintmax_t)os->next_seqno, 306 (uintmax_t)m->m_epg_seqno); 307 mtx_unlock(&os->lock); 308 return (EINVAL); 309 } 310 os->in_progress = true; 311 mtx_unlock(&os->lock); 312 } 313 #endif 314 315 /* Payload length. */ 316 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen); 317 318 /* Initialize the AAD. */ 319 ad = &state->mac; 320 ad->seq = htobe64(m->m_epg_seqno); 321 ad->type = hdr->tls_type; 322 ad->tls_vmajor = hdr->tls_vmajor; 323 ad->tls_vminor = hdr->tls_vminor; 324 ad->tls_length = htons(tls_comp_len); 325 326 /* First, compute the MAC. */ 327 iov[0].iov_base = ad; 328 iov[0].iov_len = sizeof(*ad); 329 pgoff = m->m_epg_1st_off; 330 for (i = 0; i < m->m_epg_npgs; i++, pgoff = 0) { 331 iov[i + 1].iov_base = (void *)PHYS_TO_DMAP(m->m_epg_pa[i] + 332 pgoff); 333 iov[i + 1].iov_len = m_epg_pagelen(m, i, pgoff); 334 } 335 iov[m->m_epg_npgs + 1].iov_base = m->m_epg_trail; 336 iov[m->m_epg_npgs + 1].iov_len = os->mac_len; 337 uio->uio_iov = iov; 338 uio->uio_iovcnt = m->m_epg_npgs + 2; 339 uio->uio_offset = 0; 340 uio->uio_segflg = UIO_SYSSPACE; 341 uio->uio_td = curthread; 342 uio->uio_resid = sizeof(*ad) + tls_comp_len + os->mac_len; 343 344 crypto_initreq(crp, os->mac_sid); 345 crp->crp_payload_start = 0; 346 crp->crp_payload_length = sizeof(*ad) + tls_comp_len; 347 crp->crp_digest_start = crp->crp_payload_length; 348 crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST; 349 crp->crp_flags = CRYPTO_F_CBIMM; 350 crypto_use_uio(crp, uio); 351 error = ktls_ocf_dispatch(os, crp); 352 353 crypto_destroyreq(crp); 354 if (error) { 355 #ifdef INVARIANTS 356 if (os->implicit_iv) { 357 mtx_lock(&os->lock); 358 os->in_progress = false; 359 mtx_unlock(&os->lock); 360 } 361 #endif 362 return (error); 363 } 364 365 /* Second, add the padding. */ 366 pad = m->m_epg_trllen - os->mac_len - 1; 367 for (i = 0; i < pad + 1; i++) 368 m->m_epg_trail[os->mac_len + i] = pad; 369 370 /* Finally, encrypt the record. */ 371 crypto_initreq(crp, os->sid); 372 crp->crp_payload_start = m->m_epg_hdrlen; 373 crp->crp_payload_length = tls_comp_len + m->m_epg_trllen; 374 KASSERT(crp->crp_payload_length % AES_BLOCK_LEN == 0, 375 ("invalid encryption size")); 376 crypto_use_single_mbuf(crp, m); 377 crp->crp_op = CRYPTO_OP_ENCRYPT; 378 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 379 if (os->implicit_iv) 380 memcpy(crp->crp_iv, os->iv, AES_BLOCK_LEN); 381 else 382 memcpy(crp->crp_iv, hdr + 1, AES_BLOCK_LEN); 383 384 if (outiov != NULL) { 385 uio->uio_iov = outiov; 386 uio->uio_iovcnt = outiovcnt; 387 uio->uio_offset = 0; 388 uio->uio_segflg = UIO_SYSSPACE; 389 uio->uio_td = curthread; 390 uio->uio_resid = crp->crp_payload_length; 391 crypto_use_output_uio(crp, uio); 392 } 393 394 if (os->implicit_iv) 395 counter_u64_add(ocf_tls10_cbc_encrypts, 1); 396 else 397 counter_u64_add(ocf_tls11_cbc_encrypts, 1); 398 if (outiov != NULL) 399 counter_u64_add(ocf_separate_output, 1); 400 else 401 counter_u64_add(ocf_inplace, 1); 402 error = ktls_ocf_dispatch(os, crp); 403 404 crypto_destroyreq(crp); 405 406 if (os->implicit_iv) { 407 KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN, 408 ("trailer too short to read IV")); 409 memcpy(os->iv, m->m_epg_trail + m->m_epg_trllen - AES_BLOCK_LEN, 410 AES_BLOCK_LEN); 411 #ifdef INVARIANTS 412 mtx_lock(&os->lock); 413 os->next_seqno = m->m_epg_seqno + 1; 414 os->in_progress = false; 415 mtx_unlock(&os->lock); 416 #endif 417 } 418 return (error); 419 } 420 421 static int 422 check_padding(void *arg, void *data, u_int len) 423 { 424 uint8_t pad = *(uint8_t *)arg; 425 const char *cp = data; 426 427 while (len > 0) { 428 if (*cp != pad) 429 return (EBADMSG); 430 cp++; 431 len--; 432 } 433 return (0); 434 } 435 436 static int 437 ktls_ocf_tls_cbc_decrypt(struct ktls_session *tls, 438 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno, 439 int *trailer_len) 440 { 441 struct tls_mac_data ad; 442 struct cryptop crp; 443 struct uio uio; 444 struct ktls_ocf_session *os; 445 struct iovec *iov; 446 struct mbuf *n; 447 u_int iovcnt; 448 int i, error, skip; 449 uint16_t tls_len, tls_comp_len; 450 uint8_t pad; 451 452 os = tls->ocf_session; 453 454 /* 455 * Ensure record is a multiple of the cipher block size and 456 * contains at least an explicit IV, MAC, and at least one 457 * padding byte. 458 */ 459 tls_len = ntohs(hdr->tls_length); 460 if (tls_len % AES_BLOCK_LEN != 0 || 461 tls_len < AES_BLOCK_LEN + roundup2(os->mac_len + 1, AES_BLOCK_LEN)) 462 return (EMSGSIZE); 463 464 /* First, decrypt the record. */ 465 crypto_initreq(&crp, os->sid); 466 crp.crp_iv_start = sizeof(*hdr); 467 crp.crp_payload_start = tls->params.tls_hlen; 468 crp.crp_payload_length = tls_len - AES_BLOCK_LEN; 469 crypto_use_mbuf(&crp, m); 470 crp.crp_op = CRYPTO_OP_DECRYPT; 471 crp.crp_flags = CRYPTO_F_CBIMM; 472 473 counter_u64_add(ocf_tls11_cbc_decrypts, 1); 474 475 error = ktls_ocf_dispatch(os, &crp); 476 crypto_destroyreq(&crp); 477 if (error) 478 return (error); 479 480 /* Verify the padding. */ 481 m_copydata(m, sizeof(*hdr) + tls_len - 1, 1, &pad); 482 *trailer_len = os->mac_len + pad + 1; 483 if (AES_BLOCK_LEN + *trailer_len > tls_len) 484 return (EBADMSG); 485 error = m_apply(m, sizeof(*hdr) + tls_len - (pad + 1), pad + 1, 486 check_padding, &pad); 487 if (error) 488 return (error); 489 490 /* Verify the MAC. */ 491 tls_comp_len = tls_len - (AES_BLOCK_LEN + *trailer_len); 492 memset(&uio, 0, sizeof(uio)); 493 494 /* 495 * Allocate and populate the iov. Have to skip over the TLS 496 * header in 'm' as it is not part of the MAC input. 497 */ 498 iovcnt = 1; 499 for (n = m; n != NULL; n = n->m_next) 500 iovcnt++; 501 iov = malloc(iovcnt * sizeof(*iov), M_KTLS_OCF, M_WAITOK); 502 iov[0].iov_base = &ad; 503 iov[0].iov_len = sizeof(ad); 504 skip = sizeof(*hdr) + AES_BLOCK_LEN; 505 for (i = 1, n = m; n != NULL; i++, n = n->m_next) { 506 if (n->m_len < skip) { 507 skip -= n->m_len; 508 continue; 509 } 510 iov[i].iov_base = mtod(n, char *) + skip; 511 iov[i].iov_len = n->m_len - skip; 512 skip = 0; 513 } 514 uio.uio_iov = iov; 515 uio.uio_iovcnt = i; 516 uio.uio_segflg = UIO_SYSSPACE; 517 uio.uio_td = curthread; 518 uio.uio_resid = sizeof(ad) + tls_len - AES_BLOCK_LEN; 519 520 /* Initialize the AAD. */ 521 ad.seq = htobe64(seqno); 522 ad.type = hdr->tls_type; 523 ad.tls_vmajor = hdr->tls_vmajor; 524 ad.tls_vminor = hdr->tls_vminor; 525 ad.tls_length = htons(tls_comp_len); 526 527 crypto_initreq(&crp, os->mac_sid); 528 crp.crp_payload_start = 0; 529 crp.crp_payload_length = sizeof(ad) + tls_comp_len; 530 crp.crp_digest_start = crp.crp_payload_length; 531 crp.crp_op = CRYPTO_OP_VERIFY_DIGEST; 532 crp.crp_flags = CRYPTO_F_CBIMM; 533 crypto_use_uio(&crp, &uio); 534 error = ktls_ocf_dispatch(os, &crp); 535 536 crypto_destroyreq(&crp); 537 free(iov, M_KTLS_OCF); 538 return (error); 539 } 540 541 static const struct ktls_ocf_sw ktls_ocf_tls_cbc_sw = { 542 .encrypt = ktls_ocf_tls_cbc_encrypt, 543 .decrypt = ktls_ocf_tls_cbc_decrypt 544 }; 545 546 static int 547 ktls_ocf_tls12_aead_encrypt(struct ktls_ocf_encrypt_state *state, 548 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 549 int outiovcnt) 550 { 551 const struct tls_record_layer *hdr; 552 struct uio *uio; 553 struct tls_aead_data *ad; 554 struct cryptop *crp; 555 struct ktls_ocf_session *os; 556 int error; 557 uint16_t tls_comp_len; 558 559 os = tls->ocf_session; 560 hdr = (const struct tls_record_layer *)m->m_epg_hdr; 561 crp = &state->crp; 562 uio = &state->uio; 563 564 crypto_initreq(crp, os->sid); 565 566 /* Setup the IV. */ 567 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) { 568 memcpy(crp->crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN); 569 memcpy(crp->crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, 570 sizeof(uint64_t)); 571 } else { 572 /* 573 * Chacha20-Poly1305 constructs the IV for TLS 1.2 574 * identically to constructing the IV for AEAD in TLS 575 * 1.3. 576 */ 577 memcpy(crp->crp_iv, tls->params.iv, tls->params.iv_len); 578 *(uint64_t *)(crp->crp_iv + 4) ^= htobe64(m->m_epg_seqno); 579 } 580 581 /* Setup the AAD. */ 582 ad = &state->aead; 583 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen); 584 ad->seq = htobe64(m->m_epg_seqno); 585 ad->type = hdr->tls_type; 586 ad->tls_vmajor = hdr->tls_vmajor; 587 ad->tls_vminor = hdr->tls_vminor; 588 ad->tls_length = htons(tls_comp_len); 589 crp->crp_aad = ad; 590 crp->crp_aad_length = sizeof(*ad); 591 592 /* Set fields for input payload. */ 593 crypto_use_single_mbuf(crp, m); 594 crp->crp_payload_start = m->m_epg_hdrlen; 595 crp->crp_payload_length = tls_comp_len; 596 597 if (outiov != NULL) { 598 crp->crp_digest_start = crp->crp_payload_length; 599 600 uio->uio_iov = outiov; 601 uio->uio_iovcnt = outiovcnt; 602 uio->uio_offset = 0; 603 uio->uio_segflg = UIO_SYSSPACE; 604 uio->uio_td = curthread; 605 uio->uio_resid = crp->crp_payload_length + tls->params.tls_tlen; 606 crypto_use_output_uio(crp, uio); 607 } else 608 crp->crp_digest_start = crp->crp_payload_start + 609 crp->crp_payload_length; 610 611 crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; 612 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 613 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 614 counter_u64_add(ocf_tls12_gcm_encrypts, 1); 615 else 616 counter_u64_add(ocf_tls12_chacha20_encrypts, 1); 617 if (outiov != NULL) 618 counter_u64_add(ocf_separate_output, 1); 619 else 620 counter_u64_add(ocf_inplace, 1); 621 if (tls->sync_dispatch) { 622 error = ktls_ocf_dispatch(os, crp); 623 crypto_destroyreq(crp); 624 } else 625 error = ktls_ocf_dispatch_async(state, crp); 626 return (error); 627 } 628 629 static int 630 ktls_ocf_tls12_aead_decrypt(struct ktls_session *tls, 631 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno, 632 int *trailer_len) 633 { 634 struct tls_aead_data ad; 635 struct cryptop crp; 636 struct ktls_ocf_session *os; 637 int error; 638 uint16_t tls_comp_len, tls_len; 639 640 os = tls->ocf_session; 641 642 /* Ensure record contains at least an explicit IV and tag. */ 643 tls_len = ntohs(hdr->tls_length); 644 if (tls_len + sizeof(*hdr) < tls->params.tls_hlen + 645 tls->params.tls_tlen) 646 return (EMSGSIZE); 647 648 crypto_initreq(&crp, os->sid); 649 650 /* Setup the IV. */ 651 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) { 652 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN); 653 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, 654 sizeof(uint64_t)); 655 } else { 656 /* 657 * Chacha20-Poly1305 constructs the IV for TLS 1.2 658 * identically to constructing the IV for AEAD in TLS 659 * 1.3. 660 */ 661 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len); 662 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno); 663 } 664 665 /* Setup the AAD. */ 666 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 667 tls_comp_len = tls_len - 668 (AES_GMAC_HASH_LEN + sizeof(uint64_t)); 669 else 670 tls_comp_len = tls_len - POLY1305_HASH_LEN; 671 ad.seq = htobe64(seqno); 672 ad.type = hdr->tls_type; 673 ad.tls_vmajor = hdr->tls_vmajor; 674 ad.tls_vminor = hdr->tls_vminor; 675 ad.tls_length = htons(tls_comp_len); 676 crp.crp_aad = &ad; 677 crp.crp_aad_length = sizeof(ad); 678 679 crp.crp_payload_start = tls->params.tls_hlen; 680 crp.crp_payload_length = tls_comp_len; 681 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length; 682 683 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST; 684 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 685 crypto_use_mbuf(&crp, m); 686 687 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 688 counter_u64_add(ocf_tls12_gcm_decrypts, 1); 689 else 690 counter_u64_add(ocf_tls12_chacha20_decrypts, 1); 691 error = ktls_ocf_dispatch(os, &crp); 692 693 crypto_destroyreq(&crp); 694 *trailer_len = tls->params.tls_tlen; 695 return (error); 696 } 697 698 /* 699 * Reconstruct encrypted mbuf data in input buffer. 700 */ 701 static void 702 ktls_ocf_recrypt_fixup(struct mbuf *m, u_int skip, u_int len, char *buf) 703 { 704 const char *src = buf; 705 u_int todo; 706 707 while (skip >= m->m_len) { 708 skip -= m->m_len; 709 m = m->m_next; 710 } 711 712 while (len > 0) { 713 todo = m->m_len - skip; 714 if (todo > len) 715 todo = len; 716 717 if (m->m_flags & M_DECRYPTED) 718 memcpy(mtod(m, char *) + skip, src, todo); 719 src += todo; 720 len -= todo; 721 skip = 0; 722 m = m->m_next; 723 } 724 } 725 726 static int 727 ktls_ocf_tls12_aead_recrypt(struct ktls_session *tls, 728 const struct tls_record_layer *hdr, struct mbuf *m, 729 uint64_t seqno) 730 { 731 struct cryptop crp; 732 struct ktls_ocf_session *os; 733 char *buf; 734 u_int payload_len; 735 int error; 736 uint16_t tls_len; 737 738 os = tls->ocf_session; 739 740 /* Ensure record contains at least an explicit IV and tag. */ 741 tls_len = ntohs(hdr->tls_length); 742 if (tls_len < sizeof(uint64_t) + AES_GMAC_HASH_LEN) 743 return (EMSGSIZE); 744 745 crypto_initreq(&crp, os->recrypt_sid); 746 747 KASSERT(tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16, 748 ("%s: only AES-GCM is supported", __func__)); 749 750 /* Setup the IV. */ 751 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN); 752 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t)); 753 be32enc(crp.crp_iv + AES_GCM_IV_LEN, 2); 754 755 payload_len = tls_len - (AES_GMAC_HASH_LEN + sizeof(uint64_t)); 756 crp.crp_op = CRYPTO_OP_ENCRYPT; 757 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 758 crypto_use_mbuf(&crp, m); 759 crp.crp_payload_start = tls->params.tls_hlen; 760 crp.crp_payload_length = payload_len; 761 762 buf = malloc(payload_len, M_KTLS_OCF, M_WAITOK); 763 crypto_use_output_buf(&crp, buf, payload_len); 764 765 counter_u64_add(ocf_tls12_gcm_recrypts, 1); 766 error = ktls_ocf_dispatch(os, &crp); 767 768 crypto_destroyreq(&crp); 769 770 if (error == 0) 771 ktls_ocf_recrypt_fixup(m, tls->params.tls_hlen, payload_len, 772 buf); 773 774 free(buf, M_KTLS_OCF); 775 return (error); 776 } 777 778 static const struct ktls_ocf_sw ktls_ocf_tls12_aead_sw = { 779 .encrypt = ktls_ocf_tls12_aead_encrypt, 780 .recrypt = ktls_ocf_tls12_aead_recrypt, 781 .decrypt = ktls_ocf_tls12_aead_decrypt, 782 }; 783 784 static int 785 ktls_ocf_tls13_aead_encrypt(struct ktls_ocf_encrypt_state *state, 786 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 787 int outiovcnt) 788 { 789 const struct tls_record_layer *hdr; 790 struct uio *uio; 791 struct tls_aead_data_13 *ad; 792 struct cryptop *crp; 793 struct ktls_ocf_session *os; 794 int error; 795 796 os = tls->ocf_session; 797 hdr = (const struct tls_record_layer *)m->m_epg_hdr; 798 crp = &state->crp; 799 uio = &state->uio; 800 801 crypto_initreq(crp, os->sid); 802 803 /* Setup the nonce. */ 804 memcpy(crp->crp_iv, tls->params.iv, tls->params.iv_len); 805 *(uint64_t *)(crp->crp_iv + 4) ^= htobe64(m->m_epg_seqno); 806 807 /* Setup the AAD. */ 808 ad = &state->aead13; 809 ad->type = hdr->tls_type; 810 ad->tls_vmajor = hdr->tls_vmajor; 811 ad->tls_vminor = hdr->tls_vminor; 812 ad->tls_length = hdr->tls_length; 813 crp->crp_aad = ad; 814 crp->crp_aad_length = sizeof(*ad); 815 816 /* Set fields for input payload. */ 817 crypto_use_single_mbuf(crp, m); 818 crp->crp_payload_start = m->m_epg_hdrlen; 819 crp->crp_payload_length = m->m_len - 820 (m->m_epg_hdrlen + m->m_epg_trllen); 821 822 /* Store the record type as the first byte of the trailer. */ 823 m->m_epg_trail[0] = m->m_epg_record_type; 824 crp->crp_payload_length++; 825 826 if (outiov != NULL) { 827 crp->crp_digest_start = crp->crp_payload_length; 828 829 uio->uio_iov = outiov; 830 uio->uio_iovcnt = outiovcnt; 831 uio->uio_offset = 0; 832 uio->uio_segflg = UIO_SYSSPACE; 833 uio->uio_td = curthread; 834 uio->uio_resid = m->m_len - m->m_epg_hdrlen; 835 crypto_use_output_uio(crp, uio); 836 } else 837 crp->crp_digest_start = crp->crp_payload_start + 838 crp->crp_payload_length; 839 840 crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; 841 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 842 843 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 844 counter_u64_add(ocf_tls13_gcm_encrypts, 1); 845 else 846 counter_u64_add(ocf_tls13_chacha20_encrypts, 1); 847 if (outiov != NULL) 848 counter_u64_add(ocf_separate_output, 1); 849 else 850 counter_u64_add(ocf_inplace, 1); 851 if (tls->sync_dispatch) { 852 error = ktls_ocf_dispatch(os, crp); 853 crypto_destroyreq(crp); 854 } else 855 error = ktls_ocf_dispatch_async(state, crp); 856 return (error); 857 } 858 859 static int 860 ktls_ocf_tls13_aead_decrypt(struct ktls_session *tls, 861 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno, 862 int *trailer_len) 863 { 864 struct tls_aead_data_13 ad; 865 struct cryptop crp; 866 struct ktls_ocf_session *os; 867 int error; 868 u_int tag_len; 869 uint16_t tls_len; 870 871 os = tls->ocf_session; 872 873 tag_len = tls->params.tls_tlen - 1; 874 875 /* Payload must contain at least one byte for the record type. */ 876 tls_len = ntohs(hdr->tls_length); 877 if (tls_len < tag_len + 1) 878 return (EMSGSIZE); 879 880 crypto_initreq(&crp, os->sid); 881 882 /* Setup the nonce. */ 883 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len); 884 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno); 885 886 /* Setup the AAD. */ 887 ad.type = hdr->tls_type; 888 ad.tls_vmajor = hdr->tls_vmajor; 889 ad.tls_vminor = hdr->tls_vminor; 890 ad.tls_length = hdr->tls_length; 891 crp.crp_aad = &ad; 892 crp.crp_aad_length = sizeof(ad); 893 894 crp.crp_payload_start = tls->params.tls_hlen; 895 crp.crp_payload_length = tls_len - tag_len; 896 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length; 897 898 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST; 899 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 900 crypto_use_mbuf(&crp, m); 901 902 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 903 counter_u64_add(ocf_tls13_gcm_decrypts, 1); 904 else 905 counter_u64_add(ocf_tls13_chacha20_decrypts, 1); 906 error = ktls_ocf_dispatch(os, &crp); 907 908 crypto_destroyreq(&crp); 909 *trailer_len = tag_len; 910 return (error); 911 } 912 913 static int 914 ktls_ocf_tls13_aead_recrypt(struct ktls_session *tls, 915 const struct tls_record_layer *hdr, struct mbuf *m, 916 uint64_t seqno) 917 { 918 struct cryptop crp; 919 struct ktls_ocf_session *os; 920 char *buf; 921 u_int payload_len; 922 int error; 923 uint16_t tls_len; 924 925 os = tls->ocf_session; 926 927 /* Payload must contain at least one byte for the record type. */ 928 tls_len = ntohs(hdr->tls_length); 929 if (tls_len < AES_GMAC_HASH_LEN + 1) 930 return (EMSGSIZE); 931 932 crypto_initreq(&crp, os->recrypt_sid); 933 934 KASSERT(tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16, 935 ("%s: only AES-GCM is supported", __func__)); 936 937 /* Setup the IV. */ 938 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len); 939 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno); 940 be32enc(crp.crp_iv + 12, 2); 941 942 payload_len = tls_len - AES_GMAC_HASH_LEN; 943 crp.crp_op = CRYPTO_OP_ENCRYPT; 944 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 945 crypto_use_mbuf(&crp, m); 946 crp.crp_payload_start = tls->params.tls_hlen; 947 crp.crp_payload_length = payload_len; 948 949 buf = malloc(payload_len, M_KTLS_OCF, M_WAITOK); 950 crypto_use_output_buf(&crp, buf, payload_len); 951 952 counter_u64_add(ocf_tls13_gcm_recrypts, 1); 953 error = ktls_ocf_dispatch(os, &crp); 954 955 crypto_destroyreq(&crp); 956 957 if (error == 0) 958 ktls_ocf_recrypt_fixup(m, tls->params.tls_hlen, payload_len, 959 buf); 960 961 free(buf, M_KTLS_OCF); 962 return (error); 963 } 964 965 static const struct ktls_ocf_sw ktls_ocf_tls13_aead_sw = { 966 .encrypt = ktls_ocf_tls13_aead_encrypt, 967 .recrypt = ktls_ocf_tls13_aead_recrypt, 968 .decrypt = ktls_ocf_tls13_aead_decrypt, 969 }; 970 971 void 972 ktls_ocf_free(struct ktls_session *tls) 973 { 974 struct ktls_ocf_session *os; 975 976 os = tls->ocf_session; 977 crypto_freesession(os->sid); 978 crypto_freesession(os->mac_sid); 979 crypto_freesession(os->recrypt_sid); 980 mtx_destroy(&os->lock); 981 zfree(os, M_KTLS_OCF); 982 } 983 984 int 985 ktls_ocf_try(struct ktls_session *tls, int direction) 986 { 987 struct crypto_session_params csp, mac_csp, recrypt_csp; 988 struct ktls_ocf_session *os; 989 int error, mac_len; 990 991 memset(&csp, 0, sizeof(csp)); 992 memset(&mac_csp, 0, sizeof(mac_csp)); 993 mac_csp.csp_mode = CSP_MODE_NONE; 994 mac_len = 0; 995 memset(&recrypt_csp, 0, sizeof(mac_csp)); 996 recrypt_csp.csp_mode = CSP_MODE_NONE; 997 998 switch (tls->params.cipher_algorithm) { 999 case CRYPTO_AES_NIST_GCM_16: 1000 switch (tls->params.cipher_key_len) { 1001 case 128 / 8: 1002 case 256 / 8: 1003 break; 1004 default: 1005 return (EINVAL); 1006 } 1007 1008 /* Only TLS 1.2 and 1.3 are supported. */ 1009 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 1010 tls->params.tls_vminor < TLS_MINOR_VER_TWO || 1011 tls->params.tls_vminor > TLS_MINOR_VER_THREE) 1012 return (EPROTONOSUPPORT); 1013 1014 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD; 1015 csp.csp_mode = CSP_MODE_AEAD; 1016 csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16; 1017 csp.csp_cipher_key = tls->params.cipher_key; 1018 csp.csp_cipher_klen = tls->params.cipher_key_len; 1019 csp.csp_ivlen = AES_GCM_IV_LEN; 1020 1021 recrypt_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; 1022 recrypt_csp.csp_mode = CSP_MODE_CIPHER; 1023 recrypt_csp.csp_cipher_alg = CRYPTO_AES_ICM; 1024 recrypt_csp.csp_cipher_key = tls->params.cipher_key; 1025 recrypt_csp.csp_cipher_klen = tls->params.cipher_key_len; 1026 recrypt_csp.csp_ivlen = AES_BLOCK_LEN; 1027 break; 1028 case CRYPTO_AES_CBC: 1029 switch (tls->params.cipher_key_len) { 1030 case 128 / 8: 1031 case 256 / 8: 1032 break; 1033 default: 1034 return (EINVAL); 1035 } 1036 1037 switch (tls->params.auth_algorithm) { 1038 case CRYPTO_SHA1_HMAC: 1039 mac_len = SHA1_HASH_LEN; 1040 break; 1041 case CRYPTO_SHA2_256_HMAC: 1042 mac_len = SHA2_256_HASH_LEN; 1043 break; 1044 case CRYPTO_SHA2_384_HMAC: 1045 mac_len = SHA2_384_HASH_LEN; 1046 break; 1047 default: 1048 return (EINVAL); 1049 } 1050 1051 /* Only TLS 1.0-1.2 are supported. */ 1052 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 1053 tls->params.tls_vminor < TLS_MINOR_VER_ZERO || 1054 tls->params.tls_vminor > TLS_MINOR_VER_TWO) 1055 return (EPROTONOSUPPORT); 1056 1057 /* AES-CBC is not supported for receive for TLS 1.0. */ 1058 if (direction == KTLS_RX && 1059 tls->params.tls_vminor == TLS_MINOR_VER_ZERO) 1060 return (EPROTONOSUPPORT); 1061 1062 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; 1063 csp.csp_mode = CSP_MODE_CIPHER; 1064 csp.csp_cipher_alg = CRYPTO_AES_CBC; 1065 csp.csp_cipher_key = tls->params.cipher_key; 1066 csp.csp_cipher_klen = tls->params.cipher_key_len; 1067 csp.csp_ivlen = AES_BLOCK_LEN; 1068 1069 mac_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; 1070 mac_csp.csp_mode = CSP_MODE_DIGEST; 1071 mac_csp.csp_auth_alg = tls->params.auth_algorithm; 1072 mac_csp.csp_auth_key = tls->params.auth_key; 1073 mac_csp.csp_auth_klen = tls->params.auth_key_len; 1074 break; 1075 case CRYPTO_CHACHA20_POLY1305: 1076 switch (tls->params.cipher_key_len) { 1077 case 256 / 8: 1078 break; 1079 default: 1080 return (EINVAL); 1081 } 1082 1083 /* Only TLS 1.2 and 1.3 are supported. */ 1084 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 1085 tls->params.tls_vminor < TLS_MINOR_VER_TWO || 1086 tls->params.tls_vminor > TLS_MINOR_VER_THREE) 1087 return (EPROTONOSUPPORT); 1088 1089 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD; 1090 csp.csp_mode = CSP_MODE_AEAD; 1091 csp.csp_cipher_alg = CRYPTO_CHACHA20_POLY1305; 1092 csp.csp_cipher_key = tls->params.cipher_key; 1093 csp.csp_cipher_klen = tls->params.cipher_key_len; 1094 csp.csp_ivlen = CHACHA20_POLY1305_IV_LEN; 1095 break; 1096 default: 1097 return (EPROTONOSUPPORT); 1098 } 1099 1100 os = malloc(sizeof(*os), M_KTLS_OCF, M_NOWAIT | M_ZERO); 1101 if (os == NULL) 1102 return (ENOMEM); 1103 1104 error = crypto_newsession(&os->sid, &csp, 1105 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE); 1106 if (error) { 1107 free(os, M_KTLS_OCF); 1108 return (error); 1109 } 1110 1111 if (mac_csp.csp_mode != CSP_MODE_NONE) { 1112 error = crypto_newsession(&os->mac_sid, &mac_csp, 1113 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE); 1114 if (error) { 1115 crypto_freesession(os->sid); 1116 free(os, M_KTLS_OCF); 1117 return (error); 1118 } 1119 os->mac_len = mac_len; 1120 } 1121 1122 if (recrypt_csp.csp_mode != CSP_MODE_NONE) { 1123 error = crypto_newsession(&os->recrypt_sid, &recrypt_csp, 1124 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE); 1125 if (error) { 1126 crypto_freesession(os->sid); 1127 free(os, M_KTLS_OCF); 1128 return (error); 1129 } 1130 } 1131 1132 mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF); 1133 tls->ocf_session = os; 1134 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 || 1135 tls->params.cipher_algorithm == CRYPTO_CHACHA20_POLY1305) { 1136 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE) 1137 os->sw = &ktls_ocf_tls13_aead_sw; 1138 else 1139 os->sw = &ktls_ocf_tls12_aead_sw; 1140 } else { 1141 os->sw = &ktls_ocf_tls_cbc_sw; 1142 if (tls->params.tls_vminor == TLS_MINOR_VER_ZERO) { 1143 os->implicit_iv = true; 1144 memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN); 1145 #ifdef INVARIANTS 1146 os->next_seqno = tls->next_seqno; 1147 #endif 1148 } 1149 } 1150 1151 /* 1152 * AES-CBC is always synchronous currently. Asynchronous 1153 * operation would require multiple callbacks and an additional 1154 * iovec array in ktls_ocf_encrypt_state. 1155 */ 1156 tls->sync_dispatch = CRYPTO_SESS_SYNC(os->sid) || 1157 tls->params.cipher_algorithm == CRYPTO_AES_CBC; 1158 return (0); 1159 } 1160 1161 int 1162 ktls_ocf_encrypt(struct ktls_ocf_encrypt_state *state, 1163 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 1164 int outiovcnt) 1165 { 1166 return (tls->ocf_session->sw->encrypt(state, tls, m, outiov, 1167 outiovcnt)); 1168 } 1169 1170 int 1171 ktls_ocf_decrypt(struct ktls_session *tls, const struct tls_record_layer *hdr, 1172 struct mbuf *m, uint64_t seqno, int *trailer_len) 1173 { 1174 return (tls->ocf_session->sw->decrypt(tls, hdr, m, seqno, trailer_len)); 1175 } 1176 1177 int 1178 ktls_ocf_recrypt(struct ktls_session *tls, const struct tls_record_layer *hdr, 1179 struct mbuf *m, uint64_t seqno) 1180 { 1181 return (tls->ocf_session->sw->recrypt(tls, hdr, m, seqno)); 1182 } 1183 1184 bool 1185 ktls_ocf_recrypt_supported(struct ktls_session *tls) 1186 { 1187 return (tls->ocf_session->sw->recrypt != NULL && 1188 tls->ocf_session->recrypt_sid != NULL); 1189 } 1190