1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2019 Netflix Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/counter.h> 32 #include <sys/endian.h> 33 #include <sys/kernel.h> 34 #include <sys/ktls.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/module.h> 39 #include <sys/mutex.h> 40 #include <sys/sysctl.h> 41 #include <sys/uio.h> 42 #include <vm/vm.h> 43 #include <vm/pmap.h> 44 #include <vm/vm_param.h> 45 #include <netinet/in.h> 46 #include <opencrypto/cryptodev.h> 47 #include <opencrypto/ktls.h> 48 49 struct ktls_ocf_sw { 50 /* Encrypt a single outbound TLS record. */ 51 int (*encrypt)(struct ktls_ocf_encrypt_state *state, 52 struct ktls_session *tls, struct mbuf *m, 53 struct iovec *outiov, int outiovcnt); 54 55 /* Re-encrypt a received TLS record that is partially decrypted. */ 56 int (*recrypt)(struct ktls_session *tls, 57 const struct tls_record_layer *hdr, struct mbuf *m, 58 uint64_t seqno); 59 60 /* Decrypt a received TLS record. */ 61 int (*decrypt)(struct ktls_session *tls, 62 const struct tls_record_layer *hdr, struct mbuf *m, 63 uint64_t seqno, int *trailer_len); 64 }; 65 66 struct ktls_ocf_session { 67 const struct ktls_ocf_sw *sw; 68 crypto_session_t sid; 69 crypto_session_t mac_sid; 70 crypto_session_t recrypt_sid; 71 struct mtx lock; 72 int mac_len; 73 bool implicit_iv; 74 75 /* Only used for TLS 1.0 with the implicit IV. */ 76 #ifdef INVARIANTS 77 bool in_progress; 78 uint64_t next_seqno; 79 #endif 80 char iv[AES_BLOCK_LEN]; 81 }; 82 83 struct ocf_operation { 84 struct ktls_ocf_session *os; 85 bool done; 86 }; 87 88 static MALLOC_DEFINE(M_KTLS_OCF, "ktls_ocf", "OCF KTLS"); 89 90 SYSCTL_DECL(_kern_ipc_tls); 91 SYSCTL_DECL(_kern_ipc_tls_stats); 92 93 static SYSCTL_NODE(_kern_ipc_tls_stats, OID_AUTO, ocf, 94 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 95 "Kernel TLS offload via OCF stats"); 96 97 static COUNTER_U64_DEFINE_EARLY(ocf_tls10_cbc_encrypts); 98 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls10_cbc_encrypts, 99 CTLFLAG_RD, &ocf_tls10_cbc_encrypts, 100 "Total number of OCF TLS 1.0 CBC encryption operations"); 101 102 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_decrypts); 103 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_decrypts, 104 CTLFLAG_RD, &ocf_tls11_cbc_decrypts, 105 "Total number of OCF TLS 1.1/1.2 CBC decryption operations"); 106 107 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_encrypts); 108 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_encrypts, 109 CTLFLAG_RD, &ocf_tls11_cbc_encrypts, 110 "Total number of OCF TLS 1.1/1.2 CBC encryption operations"); 111 112 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_decrypts); 113 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_decrypts, 114 CTLFLAG_RD, &ocf_tls12_gcm_decrypts, 115 "Total number of OCF TLS 1.2 GCM decryption operations"); 116 117 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_encrypts); 118 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_encrypts, 119 CTLFLAG_RD, &ocf_tls12_gcm_encrypts, 120 "Total number of OCF TLS 1.2 GCM encryption operations"); 121 122 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_recrypts); 123 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_recrypts, 124 CTLFLAG_RD, &ocf_tls12_gcm_recrypts, 125 "Total number of OCF TLS 1.2 GCM re-encryption operations"); 126 127 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_decrypts); 128 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_decrypts, 129 CTLFLAG_RD, &ocf_tls12_chacha20_decrypts, 130 "Total number of OCF TLS 1.2 Chacha20-Poly1305 decryption operations"); 131 132 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_encrypts); 133 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_encrypts, 134 CTLFLAG_RD, &ocf_tls12_chacha20_encrypts, 135 "Total number of OCF TLS 1.2 Chacha20-Poly1305 encryption operations"); 136 137 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_decrypts); 138 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_decrypts, 139 CTLFLAG_RD, &ocf_tls13_gcm_decrypts, 140 "Total number of OCF TLS 1.3 GCM decryption operations"); 141 142 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_encrypts); 143 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_encrypts, 144 CTLFLAG_RD, &ocf_tls13_gcm_encrypts, 145 "Total number of OCF TLS 1.3 GCM encryption operations"); 146 147 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_recrypts); 148 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_recrypts, 149 CTLFLAG_RD, &ocf_tls13_gcm_recrypts, 150 "Total number of OCF TLS 1.3 GCM re-encryption operations"); 151 152 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_decrypts); 153 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_decrypts, 154 CTLFLAG_RD, &ocf_tls13_chacha20_decrypts, 155 "Total number of OCF TLS 1.3 Chacha20-Poly1305 decryption operations"); 156 157 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_encrypts); 158 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_encrypts, 159 CTLFLAG_RD, &ocf_tls13_chacha20_encrypts, 160 "Total number of OCF TLS 1.3 Chacha20-Poly1305 encryption operations"); 161 162 static COUNTER_U64_DEFINE_EARLY(ocf_inplace); 163 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, inplace, 164 CTLFLAG_RD, &ocf_inplace, 165 "Total number of OCF in-place operations"); 166 167 static COUNTER_U64_DEFINE_EARLY(ocf_separate_output); 168 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, separate_output, 169 CTLFLAG_RD, &ocf_separate_output, 170 "Total number of OCF operations with a separate output buffer"); 171 172 static COUNTER_U64_DEFINE_EARLY(ocf_retries); 173 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, retries, CTLFLAG_RD, 174 &ocf_retries, 175 "Number of OCF encryption operation retries"); 176 177 static int 178 ktls_ocf_callback_sync(struct cryptop *crp __unused) 179 { 180 return (0); 181 } 182 183 static int 184 ktls_ocf_callback_async(struct cryptop *crp) 185 { 186 struct ocf_operation *oo; 187 188 oo = crp->crp_opaque; 189 mtx_lock(&oo->os->lock); 190 oo->done = true; 191 mtx_unlock(&oo->os->lock); 192 wakeup(oo); 193 return (0); 194 } 195 196 static int 197 ktls_ocf_dispatch(struct ktls_ocf_session *os, struct cryptop *crp) 198 { 199 struct ocf_operation oo; 200 int error; 201 bool async; 202 203 oo.os = os; 204 oo.done = false; 205 206 crp->crp_opaque = &oo; 207 for (;;) { 208 async = !CRYPTO_SESS_SYNC(crp->crp_session); 209 crp->crp_callback = async ? ktls_ocf_callback_async : 210 ktls_ocf_callback_sync; 211 212 error = crypto_dispatch(crp); 213 if (error) 214 break; 215 if (async) { 216 mtx_lock(&os->lock); 217 while (!oo.done) 218 mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0); 219 mtx_unlock(&os->lock); 220 } 221 222 if (crp->crp_etype != EAGAIN) { 223 error = crp->crp_etype; 224 break; 225 } 226 227 crp->crp_etype = 0; 228 crp->crp_flags &= ~CRYPTO_F_DONE; 229 oo.done = false; 230 counter_u64_add(ocf_retries, 1); 231 } 232 return (error); 233 } 234 235 static int 236 ktls_ocf_dispatch_async_cb(struct cryptop *crp) 237 { 238 struct ktls_ocf_encrypt_state *state; 239 int error; 240 241 state = crp->crp_opaque; 242 if (crp->crp_etype == EAGAIN) { 243 crp->crp_etype = 0; 244 crp->crp_flags &= ~CRYPTO_F_DONE; 245 counter_u64_add(ocf_retries, 1); 246 error = crypto_dispatch(crp); 247 if (error != 0) { 248 crypto_destroyreq(crp); 249 ktls_encrypt_cb(state, error); 250 } 251 return (0); 252 } 253 254 error = crp->crp_etype; 255 crypto_destroyreq(crp); 256 ktls_encrypt_cb(state, error); 257 return (0); 258 } 259 260 static int 261 ktls_ocf_dispatch_async(struct ktls_ocf_encrypt_state *state, 262 struct cryptop *crp) 263 { 264 int error; 265 266 crp->crp_opaque = state; 267 crp->crp_callback = ktls_ocf_dispatch_async_cb; 268 error = crypto_dispatch(crp); 269 if (error != 0) 270 crypto_destroyreq(crp); 271 return (error); 272 } 273 274 static int 275 ktls_ocf_tls_cbc_encrypt(struct ktls_ocf_encrypt_state *state, 276 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 277 int outiovcnt) 278 { 279 const struct tls_record_layer *hdr; 280 struct uio *uio; 281 struct tls_mac_data *ad; 282 struct cryptop *crp; 283 struct ktls_ocf_session *os; 284 struct iovec iov[m->m_epg_npgs + 2]; 285 u_int pgoff; 286 int i, error; 287 uint16_t tls_comp_len; 288 uint8_t pad; 289 290 MPASS(outiovcnt + 1 <= nitems(iov)); 291 292 os = tls->ocf_session; 293 hdr = (const struct tls_record_layer *)m->m_epg_hdr; 294 crp = &state->crp; 295 uio = &state->uio; 296 MPASS(tls->sync_dispatch); 297 298 #ifdef INVARIANTS 299 if (os->implicit_iv) { 300 mtx_lock(&os->lock); 301 KASSERT(!os->in_progress, 302 ("concurrent implicit IV encryptions")); 303 if (os->next_seqno != m->m_epg_seqno) { 304 printf("KTLS CBC: TLS records out of order. " 305 "Expected %ju, got %ju\n", 306 (uintmax_t)os->next_seqno, 307 (uintmax_t)m->m_epg_seqno); 308 mtx_unlock(&os->lock); 309 return (EINVAL); 310 } 311 os->in_progress = true; 312 mtx_unlock(&os->lock); 313 } 314 #endif 315 316 /* Payload length. */ 317 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen); 318 319 /* Initialize the AAD. */ 320 ad = &state->mac; 321 ad->seq = htobe64(m->m_epg_seqno); 322 ad->type = hdr->tls_type; 323 ad->tls_vmajor = hdr->tls_vmajor; 324 ad->tls_vminor = hdr->tls_vminor; 325 ad->tls_length = htons(tls_comp_len); 326 327 /* First, compute the MAC. */ 328 iov[0].iov_base = ad; 329 iov[0].iov_len = sizeof(*ad); 330 pgoff = m->m_epg_1st_off; 331 for (i = 0; i < m->m_epg_npgs; i++, pgoff = 0) { 332 iov[i + 1].iov_base = (void *)PHYS_TO_DMAP(m->m_epg_pa[i] + 333 pgoff); 334 iov[i + 1].iov_len = m_epg_pagelen(m, i, pgoff); 335 } 336 iov[m->m_epg_npgs + 1].iov_base = m->m_epg_trail; 337 iov[m->m_epg_npgs + 1].iov_len = os->mac_len; 338 uio->uio_iov = iov; 339 uio->uio_iovcnt = m->m_epg_npgs + 2; 340 uio->uio_offset = 0; 341 uio->uio_segflg = UIO_SYSSPACE; 342 uio->uio_td = curthread; 343 uio->uio_resid = sizeof(*ad) + tls_comp_len + os->mac_len; 344 345 crypto_initreq(crp, os->mac_sid); 346 crp->crp_payload_start = 0; 347 crp->crp_payload_length = sizeof(*ad) + tls_comp_len; 348 crp->crp_digest_start = crp->crp_payload_length; 349 crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST; 350 crp->crp_flags = CRYPTO_F_CBIMM; 351 crypto_use_uio(crp, uio); 352 error = ktls_ocf_dispatch(os, crp); 353 354 crypto_destroyreq(crp); 355 if (error) { 356 #ifdef INVARIANTS 357 if (os->implicit_iv) { 358 mtx_lock(&os->lock); 359 os->in_progress = false; 360 mtx_unlock(&os->lock); 361 } 362 #endif 363 return (error); 364 } 365 366 /* Second, add the padding. */ 367 pad = m->m_epg_trllen - os->mac_len - 1; 368 for (i = 0; i < pad + 1; i++) 369 m->m_epg_trail[os->mac_len + i] = pad; 370 371 /* Finally, encrypt the record. */ 372 crypto_initreq(crp, os->sid); 373 crp->crp_payload_start = m->m_epg_hdrlen; 374 crp->crp_payload_length = tls_comp_len + m->m_epg_trllen; 375 KASSERT(crp->crp_payload_length % AES_BLOCK_LEN == 0, 376 ("invalid encryption size")); 377 crypto_use_single_mbuf(crp, m); 378 crp->crp_op = CRYPTO_OP_ENCRYPT; 379 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 380 if (os->implicit_iv) 381 memcpy(crp->crp_iv, os->iv, AES_BLOCK_LEN); 382 else 383 memcpy(crp->crp_iv, hdr + 1, AES_BLOCK_LEN); 384 385 if (outiov != NULL) { 386 uio->uio_iov = outiov; 387 uio->uio_iovcnt = outiovcnt; 388 uio->uio_offset = 0; 389 uio->uio_segflg = UIO_SYSSPACE; 390 uio->uio_td = curthread; 391 uio->uio_resid = crp->crp_payload_length; 392 crypto_use_output_uio(crp, uio); 393 } 394 395 if (os->implicit_iv) 396 counter_u64_add(ocf_tls10_cbc_encrypts, 1); 397 else 398 counter_u64_add(ocf_tls11_cbc_encrypts, 1); 399 if (outiov != NULL) 400 counter_u64_add(ocf_separate_output, 1); 401 else 402 counter_u64_add(ocf_inplace, 1); 403 error = ktls_ocf_dispatch(os, crp); 404 405 crypto_destroyreq(crp); 406 407 if (os->implicit_iv) { 408 KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN, 409 ("trailer too short to read IV")); 410 memcpy(os->iv, m->m_epg_trail + m->m_epg_trllen - AES_BLOCK_LEN, 411 AES_BLOCK_LEN); 412 #ifdef INVARIANTS 413 mtx_lock(&os->lock); 414 os->next_seqno = m->m_epg_seqno + 1; 415 os->in_progress = false; 416 mtx_unlock(&os->lock); 417 #endif 418 } 419 return (error); 420 } 421 422 static int 423 check_padding(void *arg, void *data, u_int len) 424 { 425 uint8_t pad = *(uint8_t *)arg; 426 const char *cp = data; 427 428 while (len > 0) { 429 if (*cp != pad) 430 return (EBADMSG); 431 cp++; 432 len--; 433 } 434 return (0); 435 } 436 437 static int 438 ktls_ocf_tls_cbc_decrypt(struct ktls_session *tls, 439 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno, 440 int *trailer_len) 441 { 442 struct tls_mac_data ad; 443 struct cryptop crp; 444 struct uio uio; 445 struct ktls_ocf_session *os; 446 struct iovec *iov; 447 struct mbuf *n; 448 u_int iovcnt; 449 int i, error, skip; 450 uint16_t tls_len, tls_comp_len; 451 uint8_t pad; 452 453 os = tls->ocf_session; 454 455 /* 456 * Ensure record is a multiple of the cipher block size and 457 * contains at least an explicit IV, MAC, and at least one 458 * padding byte. 459 */ 460 tls_len = ntohs(hdr->tls_length); 461 if (tls_len % AES_BLOCK_LEN != 0 || 462 tls_len < AES_BLOCK_LEN + roundup2(os->mac_len + 1, AES_BLOCK_LEN)) 463 return (EMSGSIZE); 464 465 /* First, decrypt the record. */ 466 crypto_initreq(&crp, os->sid); 467 crp.crp_iv_start = sizeof(*hdr); 468 crp.crp_payload_start = tls->params.tls_hlen; 469 crp.crp_payload_length = tls_len - AES_BLOCK_LEN; 470 crypto_use_mbuf(&crp, m); 471 crp.crp_op = CRYPTO_OP_DECRYPT; 472 crp.crp_flags = CRYPTO_F_CBIMM; 473 474 counter_u64_add(ocf_tls11_cbc_decrypts, 1); 475 476 error = ktls_ocf_dispatch(os, &crp); 477 crypto_destroyreq(&crp); 478 if (error) 479 return (error); 480 481 /* Verify the padding. */ 482 m_copydata(m, sizeof(*hdr) + tls_len - 1, 1, &pad); 483 *trailer_len = os->mac_len + pad + 1; 484 if (AES_BLOCK_LEN + *trailer_len > tls_len) 485 return (EBADMSG); 486 error = m_apply(m, sizeof(*hdr) + tls_len - (pad + 1), pad + 1, 487 check_padding, &pad); 488 if (error) 489 return (error); 490 491 /* Verify the MAC. */ 492 tls_comp_len = tls_len - (AES_BLOCK_LEN + *trailer_len); 493 memset(&uio, 0, sizeof(uio)); 494 495 /* 496 * Allocate and populate the iov. Have to skip over the TLS 497 * header in 'm' as it is not part of the MAC input. 498 */ 499 iovcnt = 1; 500 for (n = m; n != NULL; n = n->m_next) 501 iovcnt++; 502 iov = malloc(iovcnt * sizeof(*iov), M_KTLS_OCF, M_WAITOK); 503 iov[0].iov_base = &ad; 504 iov[0].iov_len = sizeof(ad); 505 skip = sizeof(*hdr) + AES_BLOCK_LEN; 506 for (i = 1, n = m; n != NULL; i++, n = n->m_next) { 507 if (n->m_len < skip) { 508 skip -= n->m_len; 509 continue; 510 } 511 iov[i].iov_base = mtod(n, char *) + skip; 512 iov[i].iov_len = n->m_len - skip; 513 skip = 0; 514 } 515 uio.uio_iov = iov; 516 uio.uio_iovcnt = i; 517 uio.uio_segflg = UIO_SYSSPACE; 518 uio.uio_td = curthread; 519 uio.uio_resid = sizeof(ad) + tls_len - AES_BLOCK_LEN; 520 521 /* Initialize the AAD. */ 522 ad.seq = htobe64(seqno); 523 ad.type = hdr->tls_type; 524 ad.tls_vmajor = hdr->tls_vmajor; 525 ad.tls_vminor = hdr->tls_vminor; 526 ad.tls_length = htons(tls_comp_len); 527 528 crypto_initreq(&crp, os->mac_sid); 529 crp.crp_payload_start = 0; 530 crp.crp_payload_length = sizeof(ad) + tls_comp_len; 531 crp.crp_digest_start = crp.crp_payload_length; 532 crp.crp_op = CRYPTO_OP_VERIFY_DIGEST; 533 crp.crp_flags = CRYPTO_F_CBIMM; 534 crypto_use_uio(&crp, &uio); 535 error = ktls_ocf_dispatch(os, &crp); 536 537 crypto_destroyreq(&crp); 538 free(iov, M_KTLS_OCF); 539 return (error); 540 } 541 542 static const struct ktls_ocf_sw ktls_ocf_tls_cbc_sw = { 543 .encrypt = ktls_ocf_tls_cbc_encrypt, 544 .decrypt = ktls_ocf_tls_cbc_decrypt 545 }; 546 547 static int 548 ktls_ocf_tls12_aead_encrypt(struct ktls_ocf_encrypt_state *state, 549 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 550 int outiovcnt) 551 { 552 const struct tls_record_layer *hdr; 553 struct uio *uio; 554 struct tls_aead_data *ad; 555 struct cryptop *crp; 556 struct ktls_ocf_session *os; 557 int error; 558 uint16_t tls_comp_len; 559 560 os = tls->ocf_session; 561 hdr = (const struct tls_record_layer *)m->m_epg_hdr; 562 crp = &state->crp; 563 uio = &state->uio; 564 565 crypto_initreq(crp, os->sid); 566 567 /* Setup the IV. */ 568 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) { 569 memcpy(crp->crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN); 570 memcpy(crp->crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, 571 sizeof(uint64_t)); 572 } else { 573 /* 574 * Chacha20-Poly1305 constructs the IV for TLS 1.2 575 * identically to constructing the IV for AEAD in TLS 576 * 1.3. 577 */ 578 memcpy(crp->crp_iv, tls->params.iv, tls->params.iv_len); 579 *(uint64_t *)(crp->crp_iv + 4) ^= htobe64(m->m_epg_seqno); 580 } 581 582 /* Setup the AAD. */ 583 ad = &state->aead; 584 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen); 585 ad->seq = htobe64(m->m_epg_seqno); 586 ad->type = hdr->tls_type; 587 ad->tls_vmajor = hdr->tls_vmajor; 588 ad->tls_vminor = hdr->tls_vminor; 589 ad->tls_length = htons(tls_comp_len); 590 crp->crp_aad = ad; 591 crp->crp_aad_length = sizeof(*ad); 592 593 /* Set fields for input payload. */ 594 crypto_use_single_mbuf(crp, m); 595 crp->crp_payload_start = m->m_epg_hdrlen; 596 crp->crp_payload_length = tls_comp_len; 597 598 if (outiov != NULL) { 599 crp->crp_digest_start = crp->crp_payload_length; 600 601 uio->uio_iov = outiov; 602 uio->uio_iovcnt = outiovcnt; 603 uio->uio_offset = 0; 604 uio->uio_segflg = UIO_SYSSPACE; 605 uio->uio_td = curthread; 606 uio->uio_resid = crp->crp_payload_length + tls->params.tls_tlen; 607 crypto_use_output_uio(crp, uio); 608 } else 609 crp->crp_digest_start = crp->crp_payload_start + 610 crp->crp_payload_length; 611 612 crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; 613 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 614 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 615 counter_u64_add(ocf_tls12_gcm_encrypts, 1); 616 else 617 counter_u64_add(ocf_tls12_chacha20_encrypts, 1); 618 if (outiov != NULL) 619 counter_u64_add(ocf_separate_output, 1); 620 else 621 counter_u64_add(ocf_inplace, 1); 622 if (tls->sync_dispatch) { 623 error = ktls_ocf_dispatch(os, crp); 624 crypto_destroyreq(crp); 625 } else 626 error = ktls_ocf_dispatch_async(state, crp); 627 return (error); 628 } 629 630 static int 631 ktls_ocf_tls12_aead_decrypt(struct ktls_session *tls, 632 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno, 633 int *trailer_len) 634 { 635 struct tls_aead_data ad; 636 struct cryptop crp; 637 struct ktls_ocf_session *os; 638 int error; 639 uint16_t tls_comp_len, tls_len; 640 641 os = tls->ocf_session; 642 643 /* Ensure record contains at least an explicit IV and tag. */ 644 tls_len = ntohs(hdr->tls_length); 645 if (tls_len + sizeof(*hdr) < tls->params.tls_hlen + 646 tls->params.tls_tlen) 647 return (EMSGSIZE); 648 649 crypto_initreq(&crp, os->sid); 650 651 /* Setup the IV. */ 652 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) { 653 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN); 654 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, 655 sizeof(uint64_t)); 656 } else { 657 /* 658 * Chacha20-Poly1305 constructs the IV for TLS 1.2 659 * identically to constructing the IV for AEAD in TLS 660 * 1.3. 661 */ 662 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len); 663 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno); 664 } 665 666 /* Setup the AAD. */ 667 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 668 tls_comp_len = tls_len - 669 (AES_GMAC_HASH_LEN + sizeof(uint64_t)); 670 else 671 tls_comp_len = tls_len - POLY1305_HASH_LEN; 672 ad.seq = htobe64(seqno); 673 ad.type = hdr->tls_type; 674 ad.tls_vmajor = hdr->tls_vmajor; 675 ad.tls_vminor = hdr->tls_vminor; 676 ad.tls_length = htons(tls_comp_len); 677 crp.crp_aad = &ad; 678 crp.crp_aad_length = sizeof(ad); 679 680 crp.crp_payload_start = tls->params.tls_hlen; 681 crp.crp_payload_length = tls_comp_len; 682 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length; 683 684 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST; 685 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 686 crypto_use_mbuf(&crp, m); 687 688 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 689 counter_u64_add(ocf_tls12_gcm_decrypts, 1); 690 else 691 counter_u64_add(ocf_tls12_chacha20_decrypts, 1); 692 error = ktls_ocf_dispatch(os, &crp); 693 694 crypto_destroyreq(&crp); 695 *trailer_len = tls->params.tls_tlen; 696 return (error); 697 } 698 699 /* 700 * Reconstruct encrypted mbuf data in input buffer. 701 */ 702 static void 703 ktls_ocf_recrypt_fixup(struct mbuf *m, u_int skip, u_int len, char *buf) 704 { 705 const char *src = buf; 706 u_int todo; 707 708 while (skip >= m->m_len) { 709 skip -= m->m_len; 710 m = m->m_next; 711 } 712 713 while (len > 0) { 714 todo = m->m_len - skip; 715 if (todo > len) 716 todo = len; 717 718 if (m->m_flags & M_DECRYPTED) 719 memcpy(mtod(m, char *) + skip, src, todo); 720 src += todo; 721 len -= todo; 722 skip = 0; 723 m = m->m_next; 724 } 725 } 726 727 static int 728 ktls_ocf_tls12_aead_recrypt(struct ktls_session *tls, 729 const struct tls_record_layer *hdr, struct mbuf *m, 730 uint64_t seqno) 731 { 732 struct cryptop crp; 733 struct ktls_ocf_session *os; 734 char *buf; 735 u_int payload_len; 736 int error; 737 uint16_t tls_len; 738 739 os = tls->ocf_session; 740 741 /* Ensure record contains at least an explicit IV and tag. */ 742 tls_len = ntohs(hdr->tls_length); 743 if (tls_len < sizeof(uint64_t) + AES_GMAC_HASH_LEN) 744 return (EMSGSIZE); 745 746 crypto_initreq(&crp, os->recrypt_sid); 747 748 KASSERT(tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16, 749 ("%s: only AES-GCM is supported", __func__)); 750 751 /* Setup the IV. */ 752 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN); 753 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t)); 754 be32enc(crp.crp_iv + AES_GCM_IV_LEN, 2); 755 756 payload_len = tls_len - (AES_GMAC_HASH_LEN + sizeof(uint64_t)); 757 crp.crp_op = CRYPTO_OP_ENCRYPT; 758 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 759 crypto_use_mbuf(&crp, m); 760 crp.crp_payload_start = tls->params.tls_hlen; 761 crp.crp_payload_length = payload_len; 762 763 buf = malloc(payload_len, M_KTLS_OCF, M_WAITOK); 764 crypto_use_output_buf(&crp, buf, payload_len); 765 766 counter_u64_add(ocf_tls12_gcm_recrypts, 1); 767 error = ktls_ocf_dispatch(os, &crp); 768 769 crypto_destroyreq(&crp); 770 771 if (error == 0) 772 ktls_ocf_recrypt_fixup(m, tls->params.tls_hlen, payload_len, 773 buf); 774 775 free(buf, M_KTLS_OCF); 776 return (error); 777 } 778 779 static const struct ktls_ocf_sw ktls_ocf_tls12_aead_sw = { 780 .encrypt = ktls_ocf_tls12_aead_encrypt, 781 .recrypt = ktls_ocf_tls12_aead_recrypt, 782 .decrypt = ktls_ocf_tls12_aead_decrypt, 783 }; 784 785 static int 786 ktls_ocf_tls13_aead_encrypt(struct ktls_ocf_encrypt_state *state, 787 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 788 int outiovcnt) 789 { 790 const struct tls_record_layer *hdr; 791 struct uio *uio; 792 struct tls_aead_data_13 *ad; 793 struct cryptop *crp; 794 struct ktls_ocf_session *os; 795 int error; 796 797 os = tls->ocf_session; 798 hdr = (const struct tls_record_layer *)m->m_epg_hdr; 799 crp = &state->crp; 800 uio = &state->uio; 801 802 crypto_initreq(crp, os->sid); 803 804 /* Setup the nonce. */ 805 memcpy(crp->crp_iv, tls->params.iv, tls->params.iv_len); 806 *(uint64_t *)(crp->crp_iv + 4) ^= htobe64(m->m_epg_seqno); 807 808 /* Setup the AAD. */ 809 ad = &state->aead13; 810 ad->type = hdr->tls_type; 811 ad->tls_vmajor = hdr->tls_vmajor; 812 ad->tls_vminor = hdr->tls_vminor; 813 ad->tls_length = hdr->tls_length; 814 crp->crp_aad = ad; 815 crp->crp_aad_length = sizeof(*ad); 816 817 /* Set fields for input payload. */ 818 crypto_use_single_mbuf(crp, m); 819 crp->crp_payload_start = m->m_epg_hdrlen; 820 crp->crp_payload_length = m->m_len - 821 (m->m_epg_hdrlen + m->m_epg_trllen); 822 823 /* Store the record type as the first byte of the trailer. */ 824 m->m_epg_trail[0] = m->m_epg_record_type; 825 crp->crp_payload_length++; 826 827 if (outiov != NULL) { 828 crp->crp_digest_start = crp->crp_payload_length; 829 830 uio->uio_iov = outiov; 831 uio->uio_iovcnt = outiovcnt; 832 uio->uio_offset = 0; 833 uio->uio_segflg = UIO_SYSSPACE; 834 uio->uio_td = curthread; 835 uio->uio_resid = m->m_len - m->m_epg_hdrlen; 836 crypto_use_output_uio(crp, uio); 837 } else 838 crp->crp_digest_start = crp->crp_payload_start + 839 crp->crp_payload_length; 840 841 crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; 842 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 843 844 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 845 counter_u64_add(ocf_tls13_gcm_encrypts, 1); 846 else 847 counter_u64_add(ocf_tls13_chacha20_encrypts, 1); 848 if (outiov != NULL) 849 counter_u64_add(ocf_separate_output, 1); 850 else 851 counter_u64_add(ocf_inplace, 1); 852 if (tls->sync_dispatch) { 853 error = ktls_ocf_dispatch(os, crp); 854 crypto_destroyreq(crp); 855 } else 856 error = ktls_ocf_dispatch_async(state, crp); 857 return (error); 858 } 859 860 static int 861 ktls_ocf_tls13_aead_decrypt(struct ktls_session *tls, 862 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno, 863 int *trailer_len) 864 { 865 struct tls_aead_data_13 ad; 866 struct cryptop crp; 867 struct ktls_ocf_session *os; 868 int error; 869 u_int tag_len; 870 uint16_t tls_len; 871 872 os = tls->ocf_session; 873 874 tag_len = tls->params.tls_tlen - 1; 875 876 /* Payload must contain at least one byte for the record type. */ 877 tls_len = ntohs(hdr->tls_length); 878 if (tls_len < tag_len + 1) 879 return (EMSGSIZE); 880 881 crypto_initreq(&crp, os->sid); 882 883 /* Setup the nonce. */ 884 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len); 885 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno); 886 887 /* Setup the AAD. */ 888 ad.type = hdr->tls_type; 889 ad.tls_vmajor = hdr->tls_vmajor; 890 ad.tls_vminor = hdr->tls_vminor; 891 ad.tls_length = hdr->tls_length; 892 crp.crp_aad = &ad; 893 crp.crp_aad_length = sizeof(ad); 894 895 crp.crp_payload_start = tls->params.tls_hlen; 896 crp.crp_payload_length = tls_len - tag_len; 897 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length; 898 899 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST; 900 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 901 crypto_use_mbuf(&crp, m); 902 903 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 904 counter_u64_add(ocf_tls13_gcm_decrypts, 1); 905 else 906 counter_u64_add(ocf_tls13_chacha20_decrypts, 1); 907 error = ktls_ocf_dispatch(os, &crp); 908 909 crypto_destroyreq(&crp); 910 *trailer_len = tag_len; 911 return (error); 912 } 913 914 static int 915 ktls_ocf_tls13_aead_recrypt(struct ktls_session *tls, 916 const struct tls_record_layer *hdr, struct mbuf *m, 917 uint64_t seqno) 918 { 919 struct cryptop crp; 920 struct ktls_ocf_session *os; 921 char *buf; 922 u_int payload_len; 923 int error; 924 uint16_t tls_len; 925 926 os = tls->ocf_session; 927 928 /* Payload must contain at least one byte for the record type. */ 929 tls_len = ntohs(hdr->tls_length); 930 if (tls_len < AES_GMAC_HASH_LEN + 1) 931 return (EMSGSIZE); 932 933 crypto_initreq(&crp, os->recrypt_sid); 934 935 KASSERT(tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16, 936 ("%s: only AES-GCM is supported", __func__)); 937 938 /* Setup the IV. */ 939 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len); 940 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno); 941 be32enc(crp.crp_iv + 12, 2); 942 943 payload_len = tls_len - AES_GMAC_HASH_LEN; 944 crp.crp_op = CRYPTO_OP_ENCRYPT; 945 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 946 crypto_use_mbuf(&crp, m); 947 crp.crp_payload_start = tls->params.tls_hlen; 948 crp.crp_payload_length = payload_len; 949 950 buf = malloc(payload_len, M_KTLS_OCF, M_WAITOK); 951 crypto_use_output_buf(&crp, buf, payload_len); 952 953 counter_u64_add(ocf_tls13_gcm_recrypts, 1); 954 error = ktls_ocf_dispatch(os, &crp); 955 956 crypto_destroyreq(&crp); 957 958 if (error == 0) 959 ktls_ocf_recrypt_fixup(m, tls->params.tls_hlen, payload_len, 960 buf); 961 962 free(buf, M_KTLS_OCF); 963 return (error); 964 } 965 966 static const struct ktls_ocf_sw ktls_ocf_tls13_aead_sw = { 967 .encrypt = ktls_ocf_tls13_aead_encrypt, 968 .recrypt = ktls_ocf_tls13_aead_recrypt, 969 .decrypt = ktls_ocf_tls13_aead_decrypt, 970 }; 971 972 void 973 ktls_ocf_free(struct ktls_session *tls) 974 { 975 struct ktls_ocf_session *os; 976 977 os = tls->ocf_session; 978 crypto_freesession(os->sid); 979 crypto_freesession(os->mac_sid); 980 crypto_freesession(os->recrypt_sid); 981 mtx_destroy(&os->lock); 982 zfree(os, M_KTLS_OCF); 983 } 984 985 int 986 ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction) 987 { 988 struct crypto_session_params csp, mac_csp, recrypt_csp; 989 struct ktls_ocf_session *os; 990 int error, mac_len; 991 992 memset(&csp, 0, sizeof(csp)); 993 memset(&mac_csp, 0, sizeof(mac_csp)); 994 mac_csp.csp_mode = CSP_MODE_NONE; 995 mac_len = 0; 996 memset(&recrypt_csp, 0, sizeof(mac_csp)); 997 recrypt_csp.csp_mode = CSP_MODE_NONE; 998 999 switch (tls->params.cipher_algorithm) { 1000 case CRYPTO_AES_NIST_GCM_16: 1001 switch (tls->params.cipher_key_len) { 1002 case 128 / 8: 1003 case 256 / 8: 1004 break; 1005 default: 1006 return (EINVAL); 1007 } 1008 1009 /* Only TLS 1.2 and 1.3 are supported. */ 1010 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 1011 tls->params.tls_vminor < TLS_MINOR_VER_TWO || 1012 tls->params.tls_vminor > TLS_MINOR_VER_THREE) 1013 return (EPROTONOSUPPORT); 1014 1015 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD; 1016 csp.csp_mode = CSP_MODE_AEAD; 1017 csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16; 1018 csp.csp_cipher_key = tls->params.cipher_key; 1019 csp.csp_cipher_klen = tls->params.cipher_key_len; 1020 csp.csp_ivlen = AES_GCM_IV_LEN; 1021 1022 recrypt_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; 1023 recrypt_csp.csp_mode = CSP_MODE_CIPHER; 1024 recrypt_csp.csp_cipher_alg = CRYPTO_AES_ICM; 1025 recrypt_csp.csp_cipher_key = tls->params.cipher_key; 1026 recrypt_csp.csp_cipher_klen = tls->params.cipher_key_len; 1027 recrypt_csp.csp_ivlen = AES_BLOCK_LEN; 1028 break; 1029 case CRYPTO_AES_CBC: 1030 switch (tls->params.cipher_key_len) { 1031 case 128 / 8: 1032 case 256 / 8: 1033 break; 1034 default: 1035 return (EINVAL); 1036 } 1037 1038 switch (tls->params.auth_algorithm) { 1039 case CRYPTO_SHA1_HMAC: 1040 mac_len = SHA1_HASH_LEN; 1041 break; 1042 case CRYPTO_SHA2_256_HMAC: 1043 mac_len = SHA2_256_HASH_LEN; 1044 break; 1045 case CRYPTO_SHA2_384_HMAC: 1046 mac_len = SHA2_384_HASH_LEN; 1047 break; 1048 default: 1049 return (EINVAL); 1050 } 1051 1052 /* Only TLS 1.0-1.2 are supported. */ 1053 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 1054 tls->params.tls_vminor < TLS_MINOR_VER_ZERO || 1055 tls->params.tls_vminor > TLS_MINOR_VER_TWO) 1056 return (EPROTONOSUPPORT); 1057 1058 /* AES-CBC is not supported for receive for TLS 1.0. */ 1059 if (direction == KTLS_RX && 1060 tls->params.tls_vminor == TLS_MINOR_VER_ZERO) 1061 return (EPROTONOSUPPORT); 1062 1063 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; 1064 csp.csp_mode = CSP_MODE_CIPHER; 1065 csp.csp_cipher_alg = CRYPTO_AES_CBC; 1066 csp.csp_cipher_key = tls->params.cipher_key; 1067 csp.csp_cipher_klen = tls->params.cipher_key_len; 1068 csp.csp_ivlen = AES_BLOCK_LEN; 1069 1070 mac_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; 1071 mac_csp.csp_mode = CSP_MODE_DIGEST; 1072 mac_csp.csp_auth_alg = tls->params.auth_algorithm; 1073 mac_csp.csp_auth_key = tls->params.auth_key; 1074 mac_csp.csp_auth_klen = tls->params.auth_key_len; 1075 break; 1076 case CRYPTO_CHACHA20_POLY1305: 1077 switch (tls->params.cipher_key_len) { 1078 case 256 / 8: 1079 break; 1080 default: 1081 return (EINVAL); 1082 } 1083 1084 /* Only TLS 1.2 and 1.3 are supported. */ 1085 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 1086 tls->params.tls_vminor < TLS_MINOR_VER_TWO || 1087 tls->params.tls_vminor > TLS_MINOR_VER_THREE) 1088 return (EPROTONOSUPPORT); 1089 1090 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD; 1091 csp.csp_mode = CSP_MODE_AEAD; 1092 csp.csp_cipher_alg = CRYPTO_CHACHA20_POLY1305; 1093 csp.csp_cipher_key = tls->params.cipher_key; 1094 csp.csp_cipher_klen = tls->params.cipher_key_len; 1095 csp.csp_ivlen = CHACHA20_POLY1305_IV_LEN; 1096 break; 1097 default: 1098 return (EPROTONOSUPPORT); 1099 } 1100 1101 os = malloc(sizeof(*os), M_KTLS_OCF, M_NOWAIT | M_ZERO); 1102 if (os == NULL) 1103 return (ENOMEM); 1104 1105 error = crypto_newsession(&os->sid, &csp, 1106 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE); 1107 if (error) { 1108 free(os, M_KTLS_OCF); 1109 return (error); 1110 } 1111 1112 if (mac_csp.csp_mode != CSP_MODE_NONE) { 1113 error = crypto_newsession(&os->mac_sid, &mac_csp, 1114 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE); 1115 if (error) { 1116 crypto_freesession(os->sid); 1117 free(os, M_KTLS_OCF); 1118 return (error); 1119 } 1120 os->mac_len = mac_len; 1121 } 1122 1123 if (recrypt_csp.csp_mode != CSP_MODE_NONE) { 1124 error = crypto_newsession(&os->recrypt_sid, &recrypt_csp, 1125 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE); 1126 if (error) { 1127 crypto_freesession(os->sid); 1128 free(os, M_KTLS_OCF); 1129 return (error); 1130 } 1131 } 1132 1133 mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF); 1134 tls->ocf_session = os; 1135 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 || 1136 tls->params.cipher_algorithm == CRYPTO_CHACHA20_POLY1305) { 1137 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE) 1138 os->sw = &ktls_ocf_tls13_aead_sw; 1139 else 1140 os->sw = &ktls_ocf_tls12_aead_sw; 1141 } else { 1142 os->sw = &ktls_ocf_tls_cbc_sw; 1143 if (tls->params.tls_vminor == TLS_MINOR_VER_ZERO) { 1144 os->implicit_iv = true; 1145 memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN); 1146 #ifdef INVARIANTS 1147 os->next_seqno = tls->next_seqno; 1148 #endif 1149 } 1150 } 1151 1152 /* 1153 * AES-CBC is always synchronous currently. Asynchronous 1154 * operation would require multiple callbacks and an additional 1155 * iovec array in ktls_ocf_encrypt_state. 1156 */ 1157 tls->sync_dispatch = CRYPTO_SESS_SYNC(os->sid) || 1158 tls->params.cipher_algorithm == CRYPTO_AES_CBC; 1159 return (0); 1160 } 1161 1162 int 1163 ktls_ocf_encrypt(struct ktls_ocf_encrypt_state *state, 1164 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 1165 int outiovcnt) 1166 { 1167 return (tls->ocf_session->sw->encrypt(state, tls, m, outiov, 1168 outiovcnt)); 1169 } 1170 1171 int 1172 ktls_ocf_decrypt(struct ktls_session *tls, const struct tls_record_layer *hdr, 1173 struct mbuf *m, uint64_t seqno, int *trailer_len) 1174 { 1175 return (tls->ocf_session->sw->decrypt(tls, hdr, m, seqno, trailer_len)); 1176 } 1177 1178 int 1179 ktls_ocf_recrypt(struct ktls_session *tls, const struct tls_record_layer *hdr, 1180 struct mbuf *m, uint64_t seqno) 1181 { 1182 return (tls->ocf_session->sw->recrypt(tls, hdr, m, seqno)); 1183 } 1184 1185 bool 1186 ktls_ocf_recrypt_supported(struct ktls_session *tls) 1187 { 1188 return (tls->ocf_session->sw->recrypt != NULL && 1189 tls->ocf_session->recrypt_sid != NULL); 1190 } 1191