1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2019 Netflix Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/counter.h> 31 #include <sys/endian.h> 32 #include <sys/kernel.h> 33 #include <sys/ktls.h> 34 #include <sys/lock.h> 35 #include <sys/malloc.h> 36 #include <sys/mbuf.h> 37 #include <sys/module.h> 38 #include <sys/mutex.h> 39 #include <sys/sysctl.h> 40 #include <sys/uio.h> 41 #include <vm/vm.h> 42 #include <vm/pmap.h> 43 #include <vm/vm_param.h> 44 #include <netinet/in.h> 45 #include <opencrypto/cryptodev.h> 46 #include <opencrypto/ktls.h> 47 48 struct ktls_ocf_sw { 49 /* Encrypt a single outbound TLS record. */ 50 int (*encrypt)(struct ktls_ocf_encrypt_state *state, 51 struct ktls_session *tls, struct mbuf *m, 52 struct iovec *outiov, int outiovcnt); 53 54 /* Re-encrypt a received TLS record that is partially decrypted. */ 55 int (*recrypt)(struct ktls_session *tls, 56 const struct tls_record_layer *hdr, struct mbuf *m, 57 uint64_t seqno); 58 59 /* Decrypt a received TLS record. */ 60 int (*decrypt)(struct ktls_session *tls, 61 const struct tls_record_layer *hdr, struct mbuf *m, 62 uint64_t seqno, int *trailer_len); 63 }; 64 65 struct ktls_ocf_session { 66 const struct ktls_ocf_sw *sw; 67 crypto_session_t sid; 68 crypto_session_t mac_sid; 69 crypto_session_t recrypt_sid; 70 struct mtx lock; 71 int mac_len; 72 bool implicit_iv; 73 74 /* Only used for TLS 1.0 with the implicit IV. */ 75 #ifdef INVARIANTS 76 bool in_progress; 77 uint64_t next_seqno; 78 #endif 79 char iv[AES_BLOCK_LEN]; 80 }; 81 82 struct ocf_operation { 83 struct ktls_ocf_session *os; 84 bool done; 85 }; 86 87 static MALLOC_DEFINE(M_KTLS_OCF, "ktls_ocf", "OCF KTLS"); 88 89 SYSCTL_DECL(_kern_ipc_tls); 90 SYSCTL_DECL(_kern_ipc_tls_stats); 91 92 static SYSCTL_NODE(_kern_ipc_tls_stats, OID_AUTO, ocf, 93 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 94 "Kernel TLS offload via OCF stats"); 95 96 static COUNTER_U64_DEFINE_EARLY(ocf_tls10_cbc_encrypts); 97 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls10_cbc_encrypts, 98 CTLFLAG_RD, &ocf_tls10_cbc_encrypts, 99 "Total number of OCF TLS 1.0 CBC encryption operations"); 100 101 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_decrypts); 102 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_decrypts, 103 CTLFLAG_RD, &ocf_tls11_cbc_decrypts, 104 "Total number of OCF TLS 1.1/1.2 CBC decryption operations"); 105 106 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_encrypts); 107 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_encrypts, 108 CTLFLAG_RD, &ocf_tls11_cbc_encrypts, 109 "Total number of OCF TLS 1.1/1.2 CBC encryption operations"); 110 111 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_decrypts); 112 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_decrypts, 113 CTLFLAG_RD, &ocf_tls12_gcm_decrypts, 114 "Total number of OCF TLS 1.2 GCM decryption operations"); 115 116 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_encrypts); 117 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_encrypts, 118 CTLFLAG_RD, &ocf_tls12_gcm_encrypts, 119 "Total number of OCF TLS 1.2 GCM encryption operations"); 120 121 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_recrypts); 122 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_recrypts, 123 CTLFLAG_RD, &ocf_tls12_gcm_recrypts, 124 "Total number of OCF TLS 1.2 GCM re-encryption operations"); 125 126 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_decrypts); 127 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_decrypts, 128 CTLFLAG_RD, &ocf_tls12_chacha20_decrypts, 129 "Total number of OCF TLS 1.2 Chacha20-Poly1305 decryption operations"); 130 131 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_encrypts); 132 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_encrypts, 133 CTLFLAG_RD, &ocf_tls12_chacha20_encrypts, 134 "Total number of OCF TLS 1.2 Chacha20-Poly1305 encryption operations"); 135 136 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_decrypts); 137 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_decrypts, 138 CTLFLAG_RD, &ocf_tls13_gcm_decrypts, 139 "Total number of OCF TLS 1.3 GCM decryption operations"); 140 141 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_encrypts); 142 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_encrypts, 143 CTLFLAG_RD, &ocf_tls13_gcm_encrypts, 144 "Total number of OCF TLS 1.3 GCM encryption operations"); 145 146 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_recrypts); 147 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_recrypts, 148 CTLFLAG_RD, &ocf_tls13_gcm_recrypts, 149 "Total number of OCF TLS 1.3 GCM re-encryption operations"); 150 151 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_decrypts); 152 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_decrypts, 153 CTLFLAG_RD, &ocf_tls13_chacha20_decrypts, 154 "Total number of OCF TLS 1.3 Chacha20-Poly1305 decryption operations"); 155 156 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_encrypts); 157 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_encrypts, 158 CTLFLAG_RD, &ocf_tls13_chacha20_encrypts, 159 "Total number of OCF TLS 1.3 Chacha20-Poly1305 encryption operations"); 160 161 static COUNTER_U64_DEFINE_EARLY(ocf_inplace); 162 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, inplace, 163 CTLFLAG_RD, &ocf_inplace, 164 "Total number of OCF in-place operations"); 165 166 static COUNTER_U64_DEFINE_EARLY(ocf_separate_output); 167 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, separate_output, 168 CTLFLAG_RD, &ocf_separate_output, 169 "Total number of OCF operations with a separate output buffer"); 170 171 static COUNTER_U64_DEFINE_EARLY(ocf_retries); 172 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, retries, CTLFLAG_RD, 173 &ocf_retries, 174 "Number of OCF encryption operation retries"); 175 176 static int 177 ktls_ocf_callback_sync(struct cryptop *crp __unused) 178 { 179 return (0); 180 } 181 182 static int 183 ktls_ocf_callback_async(struct cryptop *crp) 184 { 185 struct ocf_operation *oo; 186 187 oo = crp->crp_opaque; 188 mtx_lock(&oo->os->lock); 189 oo->done = true; 190 mtx_unlock(&oo->os->lock); 191 wakeup(oo); 192 return (0); 193 } 194 195 static int 196 ktls_ocf_dispatch(struct ktls_ocf_session *os, struct cryptop *crp) 197 { 198 struct ocf_operation oo; 199 int error; 200 bool async; 201 202 oo.os = os; 203 oo.done = false; 204 205 crp->crp_opaque = &oo; 206 for (;;) { 207 async = !CRYPTO_SESS_SYNC(crp->crp_session); 208 crp->crp_callback = async ? ktls_ocf_callback_async : 209 ktls_ocf_callback_sync; 210 211 error = crypto_dispatch(crp); 212 if (error) 213 break; 214 if (async) { 215 mtx_lock(&os->lock); 216 while (!oo.done) 217 mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0); 218 mtx_unlock(&os->lock); 219 } 220 221 if (crp->crp_etype != EAGAIN) { 222 error = crp->crp_etype; 223 break; 224 } 225 226 crp->crp_etype = 0; 227 oo.done = false; 228 counter_u64_add(ocf_retries, 1); 229 } 230 return (error); 231 } 232 233 static int 234 ktls_ocf_dispatch_async_cb(struct cryptop *crp) 235 { 236 struct ktls_ocf_encrypt_state *state; 237 int error; 238 239 state = crp->crp_opaque; 240 if (crp->crp_etype == EAGAIN) { 241 crp->crp_etype = 0; 242 counter_u64_add(ocf_retries, 1); 243 error = crypto_dispatch(crp); 244 if (error != 0) { 245 crypto_destroyreq(crp); 246 ktls_encrypt_cb(state, error); 247 } 248 return (0); 249 } 250 251 error = crp->crp_etype; 252 crypto_destroyreq(crp); 253 ktls_encrypt_cb(state, error); 254 return (0); 255 } 256 257 static int 258 ktls_ocf_dispatch_async(struct ktls_ocf_encrypt_state *state, 259 struct cryptop *crp) 260 { 261 int error; 262 263 crp->crp_opaque = state; 264 crp->crp_callback = ktls_ocf_dispatch_async_cb; 265 error = crypto_dispatch(crp); 266 if (error != 0) 267 crypto_destroyreq(crp); 268 return (error); 269 } 270 271 static int 272 ktls_ocf_tls_cbc_encrypt(struct ktls_ocf_encrypt_state *state, 273 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 274 int outiovcnt) 275 { 276 const struct tls_record_layer *hdr; 277 struct uio *uio; 278 struct tls_mac_data *ad; 279 struct cryptop *crp; 280 struct ktls_ocf_session *os; 281 struct iovec iov[m->m_epg_npgs + 2]; 282 u_int pgoff; 283 int i, error; 284 uint16_t tls_comp_len; 285 uint8_t pad; 286 287 MPASS(outiovcnt + 1 <= nitems(iov)); 288 289 os = tls->ocf_session; 290 hdr = (const struct tls_record_layer *)m->m_epg_hdr; 291 crp = &state->crp; 292 uio = &state->uio; 293 MPASS(tls->sync_dispatch); 294 295 #ifdef INVARIANTS 296 if (os->implicit_iv) { 297 mtx_lock(&os->lock); 298 KASSERT(!os->in_progress, 299 ("concurrent implicit IV encryptions")); 300 if (os->next_seqno != m->m_epg_seqno) { 301 printf("KTLS CBC: TLS records out of order. " 302 "Expected %ju, got %ju\n", 303 (uintmax_t)os->next_seqno, 304 (uintmax_t)m->m_epg_seqno); 305 mtx_unlock(&os->lock); 306 return (EINVAL); 307 } 308 os->in_progress = true; 309 mtx_unlock(&os->lock); 310 } 311 #endif 312 313 /* Payload length. */ 314 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen); 315 316 /* Initialize the AAD. */ 317 ad = &state->mac; 318 ad->seq = htobe64(m->m_epg_seqno); 319 ad->type = hdr->tls_type; 320 ad->tls_vmajor = hdr->tls_vmajor; 321 ad->tls_vminor = hdr->tls_vminor; 322 ad->tls_length = htons(tls_comp_len); 323 324 /* First, compute the MAC. */ 325 iov[0].iov_base = ad; 326 iov[0].iov_len = sizeof(*ad); 327 pgoff = m->m_epg_1st_off; 328 for (i = 0; i < m->m_epg_npgs; i++, pgoff = 0) { 329 iov[i + 1].iov_base = (void *)PHYS_TO_DMAP(m->m_epg_pa[i] + 330 pgoff); 331 iov[i + 1].iov_len = m_epg_pagelen(m, i, pgoff); 332 } 333 iov[m->m_epg_npgs + 1].iov_base = m->m_epg_trail; 334 iov[m->m_epg_npgs + 1].iov_len = os->mac_len; 335 uio->uio_iov = iov; 336 uio->uio_iovcnt = m->m_epg_npgs + 2; 337 uio->uio_offset = 0; 338 uio->uio_segflg = UIO_SYSSPACE; 339 uio->uio_td = curthread; 340 uio->uio_resid = sizeof(*ad) + tls_comp_len + os->mac_len; 341 342 crypto_initreq(crp, os->mac_sid); 343 crp->crp_payload_start = 0; 344 crp->crp_payload_length = sizeof(*ad) + tls_comp_len; 345 crp->crp_digest_start = crp->crp_payload_length; 346 crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST; 347 crp->crp_flags = CRYPTO_F_CBIMM; 348 crypto_use_uio(crp, uio); 349 error = ktls_ocf_dispatch(os, crp); 350 351 crypto_destroyreq(crp); 352 if (error) { 353 #ifdef INVARIANTS 354 if (os->implicit_iv) { 355 mtx_lock(&os->lock); 356 os->in_progress = false; 357 mtx_unlock(&os->lock); 358 } 359 #endif 360 return (error); 361 } 362 363 /* Second, add the padding. */ 364 pad = m->m_epg_trllen - os->mac_len - 1; 365 for (i = 0; i < pad + 1; i++) 366 m->m_epg_trail[os->mac_len + i] = pad; 367 368 /* Finally, encrypt the record. */ 369 crypto_initreq(crp, os->sid); 370 crp->crp_payload_start = m->m_epg_hdrlen; 371 crp->crp_payload_length = tls_comp_len + m->m_epg_trllen; 372 KASSERT(crp->crp_payload_length % AES_BLOCK_LEN == 0, 373 ("invalid encryption size")); 374 crypto_use_single_mbuf(crp, m); 375 crp->crp_op = CRYPTO_OP_ENCRYPT; 376 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 377 if (os->implicit_iv) 378 memcpy(crp->crp_iv, os->iv, AES_BLOCK_LEN); 379 else 380 memcpy(crp->crp_iv, hdr + 1, AES_BLOCK_LEN); 381 382 if (outiov != NULL) { 383 uio->uio_iov = outiov; 384 uio->uio_iovcnt = outiovcnt; 385 uio->uio_offset = 0; 386 uio->uio_segflg = UIO_SYSSPACE; 387 uio->uio_td = curthread; 388 uio->uio_resid = crp->crp_payload_length; 389 crypto_use_output_uio(crp, uio); 390 } 391 392 if (os->implicit_iv) 393 counter_u64_add(ocf_tls10_cbc_encrypts, 1); 394 else 395 counter_u64_add(ocf_tls11_cbc_encrypts, 1); 396 if (outiov != NULL) 397 counter_u64_add(ocf_separate_output, 1); 398 else 399 counter_u64_add(ocf_inplace, 1); 400 error = ktls_ocf_dispatch(os, crp); 401 402 crypto_destroyreq(crp); 403 404 if (os->implicit_iv) { 405 KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN, 406 ("trailer too short to read IV")); 407 memcpy(os->iv, m->m_epg_trail + m->m_epg_trllen - AES_BLOCK_LEN, 408 AES_BLOCK_LEN); 409 #ifdef INVARIANTS 410 mtx_lock(&os->lock); 411 os->next_seqno = m->m_epg_seqno + 1; 412 os->in_progress = false; 413 mtx_unlock(&os->lock); 414 #endif 415 } 416 return (error); 417 } 418 419 static int 420 check_padding(void *arg, void *data, u_int len) 421 { 422 uint8_t pad = *(uint8_t *)arg; 423 const char *cp = data; 424 425 while (len > 0) { 426 if (*cp != pad) 427 return (EBADMSG); 428 cp++; 429 len--; 430 } 431 return (0); 432 } 433 434 static int 435 ktls_ocf_tls_cbc_decrypt(struct ktls_session *tls, 436 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno, 437 int *trailer_len) 438 { 439 struct tls_mac_data ad; 440 struct cryptop crp; 441 struct uio uio; 442 struct ktls_ocf_session *os; 443 struct iovec *iov; 444 struct mbuf *n; 445 u_int iovcnt; 446 int i, error, skip; 447 uint16_t tls_len, tls_comp_len; 448 uint8_t pad; 449 450 os = tls->ocf_session; 451 452 /* 453 * Ensure record is a multiple of the cipher block size and 454 * contains at least an explicit IV, MAC, and at least one 455 * padding byte. 456 */ 457 tls_len = ntohs(hdr->tls_length); 458 if (tls_len % AES_BLOCK_LEN != 0 || 459 tls_len < AES_BLOCK_LEN + roundup2(os->mac_len + 1, AES_BLOCK_LEN)) 460 return (EMSGSIZE); 461 462 /* First, decrypt the record. */ 463 crypto_initreq(&crp, os->sid); 464 crp.crp_iv_start = sizeof(*hdr); 465 crp.crp_payload_start = tls->params.tls_hlen; 466 crp.crp_payload_length = tls_len - AES_BLOCK_LEN; 467 crypto_use_mbuf(&crp, m); 468 crp.crp_op = CRYPTO_OP_DECRYPT; 469 crp.crp_flags = CRYPTO_F_CBIMM; 470 471 counter_u64_add(ocf_tls11_cbc_decrypts, 1); 472 473 error = ktls_ocf_dispatch(os, &crp); 474 crypto_destroyreq(&crp); 475 if (error) 476 return (error); 477 478 /* Verify the padding. */ 479 m_copydata(m, sizeof(*hdr) + tls_len - 1, 1, &pad); 480 *trailer_len = os->mac_len + pad + 1; 481 if (AES_BLOCK_LEN + *trailer_len > tls_len) 482 return (EBADMSG); 483 error = m_apply(m, sizeof(*hdr) + tls_len - (pad + 1), pad + 1, 484 check_padding, &pad); 485 if (error) 486 return (error); 487 488 /* Verify the MAC. */ 489 tls_comp_len = tls_len - (AES_BLOCK_LEN + *trailer_len); 490 memset(&uio, 0, sizeof(uio)); 491 492 /* 493 * Allocate and populate the iov. Have to skip over the TLS 494 * header in 'm' as it is not part of the MAC input. 495 */ 496 iovcnt = 1; 497 for (n = m; n != NULL; n = n->m_next) 498 iovcnt++; 499 iov = malloc(iovcnt * sizeof(*iov), M_KTLS_OCF, M_WAITOK); 500 iov[0].iov_base = &ad; 501 iov[0].iov_len = sizeof(ad); 502 skip = sizeof(*hdr) + AES_BLOCK_LEN; 503 for (i = 1, n = m; n != NULL; i++, n = n->m_next) { 504 if (n->m_len < skip) { 505 skip -= n->m_len; 506 continue; 507 } 508 iov[i].iov_base = mtod(n, char *) + skip; 509 iov[i].iov_len = n->m_len - skip; 510 skip = 0; 511 } 512 uio.uio_iov = iov; 513 uio.uio_iovcnt = i; 514 uio.uio_segflg = UIO_SYSSPACE; 515 uio.uio_td = curthread; 516 uio.uio_resid = sizeof(ad) + tls_len - AES_BLOCK_LEN; 517 518 /* Initialize the AAD. */ 519 ad.seq = htobe64(seqno); 520 ad.type = hdr->tls_type; 521 ad.tls_vmajor = hdr->tls_vmajor; 522 ad.tls_vminor = hdr->tls_vminor; 523 ad.tls_length = htons(tls_comp_len); 524 525 crypto_initreq(&crp, os->mac_sid); 526 crp.crp_payload_start = 0; 527 crp.crp_payload_length = sizeof(ad) + tls_comp_len; 528 crp.crp_digest_start = crp.crp_payload_length; 529 crp.crp_op = CRYPTO_OP_VERIFY_DIGEST; 530 crp.crp_flags = CRYPTO_F_CBIMM; 531 crypto_use_uio(&crp, &uio); 532 error = ktls_ocf_dispatch(os, &crp); 533 534 crypto_destroyreq(&crp); 535 free(iov, M_KTLS_OCF); 536 return (error); 537 } 538 539 static const struct ktls_ocf_sw ktls_ocf_tls_cbc_sw = { 540 .encrypt = ktls_ocf_tls_cbc_encrypt, 541 .decrypt = ktls_ocf_tls_cbc_decrypt 542 }; 543 544 static int 545 ktls_ocf_tls12_aead_encrypt(struct ktls_ocf_encrypt_state *state, 546 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 547 int outiovcnt) 548 { 549 const struct tls_record_layer *hdr; 550 struct uio *uio; 551 struct tls_aead_data *ad; 552 struct cryptop *crp; 553 struct ktls_ocf_session *os; 554 int error; 555 uint16_t tls_comp_len; 556 557 os = tls->ocf_session; 558 hdr = (const struct tls_record_layer *)m->m_epg_hdr; 559 crp = &state->crp; 560 uio = &state->uio; 561 562 crypto_initreq(crp, os->sid); 563 564 /* Setup the IV. */ 565 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) { 566 memcpy(crp->crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN); 567 memcpy(crp->crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, 568 sizeof(uint64_t)); 569 } else { 570 /* 571 * Chacha20-Poly1305 constructs the IV for TLS 1.2 572 * identically to constructing the IV for AEAD in TLS 573 * 1.3. 574 */ 575 memcpy(crp->crp_iv, tls->params.iv, tls->params.iv_len); 576 *(uint64_t *)(crp->crp_iv + 4) ^= htobe64(m->m_epg_seqno); 577 } 578 579 /* Setup the AAD. */ 580 ad = &state->aead; 581 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen); 582 ad->seq = htobe64(m->m_epg_seqno); 583 ad->type = hdr->tls_type; 584 ad->tls_vmajor = hdr->tls_vmajor; 585 ad->tls_vminor = hdr->tls_vminor; 586 ad->tls_length = htons(tls_comp_len); 587 crp->crp_aad = ad; 588 crp->crp_aad_length = sizeof(*ad); 589 590 /* Set fields for input payload. */ 591 crypto_use_single_mbuf(crp, m); 592 crp->crp_payload_start = m->m_epg_hdrlen; 593 crp->crp_payload_length = tls_comp_len; 594 595 if (outiov != NULL) { 596 crp->crp_digest_start = crp->crp_payload_length; 597 598 uio->uio_iov = outiov; 599 uio->uio_iovcnt = outiovcnt; 600 uio->uio_offset = 0; 601 uio->uio_segflg = UIO_SYSSPACE; 602 uio->uio_td = curthread; 603 uio->uio_resid = crp->crp_payload_length + tls->params.tls_tlen; 604 crypto_use_output_uio(crp, uio); 605 } else 606 crp->crp_digest_start = crp->crp_payload_start + 607 crp->crp_payload_length; 608 609 crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; 610 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 611 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 612 counter_u64_add(ocf_tls12_gcm_encrypts, 1); 613 else 614 counter_u64_add(ocf_tls12_chacha20_encrypts, 1); 615 if (outiov != NULL) 616 counter_u64_add(ocf_separate_output, 1); 617 else 618 counter_u64_add(ocf_inplace, 1); 619 if (tls->sync_dispatch) { 620 error = ktls_ocf_dispatch(os, crp); 621 crypto_destroyreq(crp); 622 } else 623 error = ktls_ocf_dispatch_async(state, crp); 624 return (error); 625 } 626 627 static int 628 ktls_ocf_tls12_aead_decrypt(struct ktls_session *tls, 629 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno, 630 int *trailer_len) 631 { 632 struct tls_aead_data ad; 633 struct cryptop crp; 634 struct ktls_ocf_session *os; 635 int error; 636 uint16_t tls_comp_len, tls_len; 637 638 os = tls->ocf_session; 639 640 /* Ensure record contains at least an explicit IV and tag. */ 641 tls_len = ntohs(hdr->tls_length); 642 if (tls_len + sizeof(*hdr) < tls->params.tls_hlen + 643 tls->params.tls_tlen) 644 return (EMSGSIZE); 645 646 crypto_initreq(&crp, os->sid); 647 648 /* Setup the IV. */ 649 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) { 650 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN); 651 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, 652 sizeof(uint64_t)); 653 } else { 654 /* 655 * Chacha20-Poly1305 constructs the IV for TLS 1.2 656 * identically to constructing the IV for AEAD in TLS 657 * 1.3. 658 */ 659 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len); 660 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno); 661 } 662 663 /* Setup the AAD. */ 664 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 665 tls_comp_len = tls_len - 666 (AES_GMAC_HASH_LEN + sizeof(uint64_t)); 667 else 668 tls_comp_len = tls_len - POLY1305_HASH_LEN; 669 ad.seq = htobe64(seqno); 670 ad.type = hdr->tls_type; 671 ad.tls_vmajor = hdr->tls_vmajor; 672 ad.tls_vminor = hdr->tls_vminor; 673 ad.tls_length = htons(tls_comp_len); 674 crp.crp_aad = &ad; 675 crp.crp_aad_length = sizeof(ad); 676 677 crp.crp_payload_start = tls->params.tls_hlen; 678 crp.crp_payload_length = tls_comp_len; 679 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length; 680 681 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST; 682 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 683 crypto_use_mbuf(&crp, m); 684 685 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 686 counter_u64_add(ocf_tls12_gcm_decrypts, 1); 687 else 688 counter_u64_add(ocf_tls12_chacha20_decrypts, 1); 689 error = ktls_ocf_dispatch(os, &crp); 690 691 crypto_destroyreq(&crp); 692 *trailer_len = tls->params.tls_tlen; 693 return (error); 694 } 695 696 /* 697 * Reconstruct encrypted mbuf data in input buffer. 698 */ 699 static void 700 ktls_ocf_recrypt_fixup(struct mbuf *m, u_int skip, u_int len, char *buf) 701 { 702 const char *src = buf; 703 u_int todo; 704 705 while (skip >= m->m_len) { 706 skip -= m->m_len; 707 m = m->m_next; 708 } 709 710 while (len > 0) { 711 todo = m->m_len - skip; 712 if (todo > len) 713 todo = len; 714 715 if (m->m_flags & M_DECRYPTED) 716 memcpy(mtod(m, char *) + skip, src, todo); 717 src += todo; 718 len -= todo; 719 skip = 0; 720 m = m->m_next; 721 } 722 } 723 724 static int 725 ktls_ocf_tls12_aead_recrypt(struct ktls_session *tls, 726 const struct tls_record_layer *hdr, struct mbuf *m, 727 uint64_t seqno) 728 { 729 struct cryptop crp; 730 struct ktls_ocf_session *os; 731 char *buf; 732 u_int payload_len; 733 int error; 734 uint16_t tls_len; 735 736 os = tls->ocf_session; 737 738 /* Ensure record contains at least an explicit IV and tag. */ 739 tls_len = ntohs(hdr->tls_length); 740 if (tls_len < sizeof(uint64_t) + AES_GMAC_HASH_LEN) 741 return (EMSGSIZE); 742 743 crypto_initreq(&crp, os->recrypt_sid); 744 745 KASSERT(tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16, 746 ("%s: only AES-GCM is supported", __func__)); 747 748 /* Setup the IV. */ 749 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN); 750 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t)); 751 be32enc(crp.crp_iv + AES_GCM_IV_LEN, 2); 752 753 payload_len = tls_len - (AES_GMAC_HASH_LEN + sizeof(uint64_t)); 754 crp.crp_op = CRYPTO_OP_ENCRYPT; 755 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 756 crypto_use_mbuf(&crp, m); 757 crp.crp_payload_start = tls->params.tls_hlen; 758 crp.crp_payload_length = payload_len; 759 760 buf = malloc(payload_len, M_KTLS_OCF, M_WAITOK); 761 crypto_use_output_buf(&crp, buf, payload_len); 762 763 counter_u64_add(ocf_tls12_gcm_recrypts, 1); 764 error = ktls_ocf_dispatch(os, &crp); 765 766 crypto_destroyreq(&crp); 767 768 if (error == 0) 769 ktls_ocf_recrypt_fixup(m, tls->params.tls_hlen, payload_len, 770 buf); 771 772 free(buf, M_KTLS_OCF); 773 return (error); 774 } 775 776 static const struct ktls_ocf_sw ktls_ocf_tls12_aead_sw = { 777 .encrypt = ktls_ocf_tls12_aead_encrypt, 778 .recrypt = ktls_ocf_tls12_aead_recrypt, 779 .decrypt = ktls_ocf_tls12_aead_decrypt, 780 }; 781 782 static int 783 ktls_ocf_tls13_aead_encrypt(struct ktls_ocf_encrypt_state *state, 784 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 785 int outiovcnt) 786 { 787 const struct tls_record_layer *hdr; 788 struct uio *uio; 789 struct tls_aead_data_13 *ad; 790 struct cryptop *crp; 791 struct ktls_ocf_session *os; 792 int error; 793 794 os = tls->ocf_session; 795 hdr = (const struct tls_record_layer *)m->m_epg_hdr; 796 crp = &state->crp; 797 uio = &state->uio; 798 799 crypto_initreq(crp, os->sid); 800 801 /* Setup the nonce. */ 802 memcpy(crp->crp_iv, tls->params.iv, tls->params.iv_len); 803 *(uint64_t *)(crp->crp_iv + 4) ^= htobe64(m->m_epg_seqno); 804 805 /* Setup the AAD. */ 806 ad = &state->aead13; 807 ad->type = hdr->tls_type; 808 ad->tls_vmajor = hdr->tls_vmajor; 809 ad->tls_vminor = hdr->tls_vminor; 810 ad->tls_length = hdr->tls_length; 811 crp->crp_aad = ad; 812 crp->crp_aad_length = sizeof(*ad); 813 814 /* Set fields for input payload. */ 815 crypto_use_single_mbuf(crp, m); 816 crp->crp_payload_start = m->m_epg_hdrlen; 817 crp->crp_payload_length = m->m_len - 818 (m->m_epg_hdrlen + m->m_epg_trllen); 819 820 /* Store the record type as the first byte of the trailer. */ 821 m->m_epg_trail[0] = m->m_epg_record_type; 822 crp->crp_payload_length++; 823 824 if (outiov != NULL) { 825 crp->crp_digest_start = crp->crp_payload_length; 826 827 uio->uio_iov = outiov; 828 uio->uio_iovcnt = outiovcnt; 829 uio->uio_offset = 0; 830 uio->uio_segflg = UIO_SYSSPACE; 831 uio->uio_td = curthread; 832 uio->uio_resid = m->m_len - m->m_epg_hdrlen; 833 crypto_use_output_uio(crp, uio); 834 } else 835 crp->crp_digest_start = crp->crp_payload_start + 836 crp->crp_payload_length; 837 838 crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; 839 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 840 841 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 842 counter_u64_add(ocf_tls13_gcm_encrypts, 1); 843 else 844 counter_u64_add(ocf_tls13_chacha20_encrypts, 1); 845 if (outiov != NULL) 846 counter_u64_add(ocf_separate_output, 1); 847 else 848 counter_u64_add(ocf_inplace, 1); 849 if (tls->sync_dispatch) { 850 error = ktls_ocf_dispatch(os, crp); 851 crypto_destroyreq(crp); 852 } else 853 error = ktls_ocf_dispatch_async(state, crp); 854 return (error); 855 } 856 857 static int 858 ktls_ocf_tls13_aead_decrypt(struct ktls_session *tls, 859 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno, 860 int *trailer_len) 861 { 862 struct tls_aead_data_13 ad; 863 struct cryptop crp; 864 struct ktls_ocf_session *os; 865 int error; 866 u_int tag_len; 867 uint16_t tls_len; 868 869 os = tls->ocf_session; 870 871 tag_len = tls->params.tls_tlen - 1; 872 873 /* Payload must contain at least one byte for the record type. */ 874 tls_len = ntohs(hdr->tls_length); 875 if (tls_len < tag_len + 1) 876 return (EMSGSIZE); 877 878 crypto_initreq(&crp, os->sid); 879 880 /* Setup the nonce. */ 881 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len); 882 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno); 883 884 /* Setup the AAD. */ 885 ad.type = hdr->tls_type; 886 ad.tls_vmajor = hdr->tls_vmajor; 887 ad.tls_vminor = hdr->tls_vminor; 888 ad.tls_length = hdr->tls_length; 889 crp.crp_aad = &ad; 890 crp.crp_aad_length = sizeof(ad); 891 892 crp.crp_payload_start = tls->params.tls_hlen; 893 crp.crp_payload_length = tls_len - tag_len; 894 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length; 895 896 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST; 897 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 898 crypto_use_mbuf(&crp, m); 899 900 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 901 counter_u64_add(ocf_tls13_gcm_decrypts, 1); 902 else 903 counter_u64_add(ocf_tls13_chacha20_decrypts, 1); 904 error = ktls_ocf_dispatch(os, &crp); 905 906 crypto_destroyreq(&crp); 907 *trailer_len = tag_len; 908 return (error); 909 } 910 911 static int 912 ktls_ocf_tls13_aead_recrypt(struct ktls_session *tls, 913 const struct tls_record_layer *hdr, struct mbuf *m, 914 uint64_t seqno) 915 { 916 struct cryptop crp; 917 struct ktls_ocf_session *os; 918 char *buf; 919 u_int payload_len; 920 int error; 921 uint16_t tls_len; 922 923 os = tls->ocf_session; 924 925 /* Payload must contain at least one byte for the record type. */ 926 tls_len = ntohs(hdr->tls_length); 927 if (tls_len < AES_GMAC_HASH_LEN + 1) 928 return (EMSGSIZE); 929 930 crypto_initreq(&crp, os->recrypt_sid); 931 932 KASSERT(tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16, 933 ("%s: only AES-GCM is supported", __func__)); 934 935 /* Setup the IV. */ 936 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len); 937 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno); 938 be32enc(crp.crp_iv + 12, 2); 939 940 payload_len = tls_len - AES_GMAC_HASH_LEN; 941 crp.crp_op = CRYPTO_OP_ENCRYPT; 942 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 943 crypto_use_mbuf(&crp, m); 944 crp.crp_payload_start = tls->params.tls_hlen; 945 crp.crp_payload_length = payload_len; 946 947 buf = malloc(payload_len, M_KTLS_OCF, M_WAITOK); 948 crypto_use_output_buf(&crp, buf, payload_len); 949 950 counter_u64_add(ocf_tls13_gcm_recrypts, 1); 951 error = ktls_ocf_dispatch(os, &crp); 952 953 crypto_destroyreq(&crp); 954 955 if (error == 0) 956 ktls_ocf_recrypt_fixup(m, tls->params.tls_hlen, payload_len, 957 buf); 958 959 free(buf, M_KTLS_OCF); 960 return (error); 961 } 962 963 static const struct ktls_ocf_sw ktls_ocf_tls13_aead_sw = { 964 .encrypt = ktls_ocf_tls13_aead_encrypt, 965 .recrypt = ktls_ocf_tls13_aead_recrypt, 966 .decrypt = ktls_ocf_tls13_aead_decrypt, 967 }; 968 969 void 970 ktls_ocf_free(struct ktls_session *tls) 971 { 972 struct ktls_ocf_session *os; 973 974 os = tls->ocf_session; 975 crypto_freesession(os->sid); 976 crypto_freesession(os->mac_sid); 977 crypto_freesession(os->recrypt_sid); 978 mtx_destroy(&os->lock); 979 zfree(os, M_KTLS_OCF); 980 } 981 982 int 983 ktls_ocf_try(struct ktls_session *tls, int direction) 984 { 985 struct crypto_session_params csp, mac_csp, recrypt_csp; 986 struct ktls_ocf_session *os; 987 int error, mac_len; 988 989 memset(&csp, 0, sizeof(csp)); 990 memset(&mac_csp, 0, sizeof(mac_csp)); 991 mac_csp.csp_mode = CSP_MODE_NONE; 992 mac_len = 0; 993 memset(&recrypt_csp, 0, sizeof(mac_csp)); 994 recrypt_csp.csp_mode = CSP_MODE_NONE; 995 996 switch (tls->params.cipher_algorithm) { 997 case CRYPTO_AES_NIST_GCM_16: 998 switch (tls->params.cipher_key_len) { 999 case 128 / 8: 1000 case 256 / 8: 1001 break; 1002 default: 1003 return (EINVAL); 1004 } 1005 1006 /* Only TLS 1.2 and 1.3 are supported. */ 1007 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 1008 tls->params.tls_vminor < TLS_MINOR_VER_TWO || 1009 tls->params.tls_vminor > TLS_MINOR_VER_THREE) 1010 return (EPROTONOSUPPORT); 1011 1012 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD; 1013 csp.csp_mode = CSP_MODE_AEAD; 1014 csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16; 1015 csp.csp_cipher_key = tls->params.cipher_key; 1016 csp.csp_cipher_klen = tls->params.cipher_key_len; 1017 csp.csp_ivlen = AES_GCM_IV_LEN; 1018 1019 recrypt_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; 1020 recrypt_csp.csp_mode = CSP_MODE_CIPHER; 1021 recrypt_csp.csp_cipher_alg = CRYPTO_AES_ICM; 1022 recrypt_csp.csp_cipher_key = tls->params.cipher_key; 1023 recrypt_csp.csp_cipher_klen = tls->params.cipher_key_len; 1024 recrypt_csp.csp_ivlen = AES_BLOCK_LEN; 1025 break; 1026 case CRYPTO_AES_CBC: 1027 switch (tls->params.cipher_key_len) { 1028 case 128 / 8: 1029 case 256 / 8: 1030 break; 1031 default: 1032 return (EINVAL); 1033 } 1034 1035 switch (tls->params.auth_algorithm) { 1036 case CRYPTO_SHA1_HMAC: 1037 mac_len = SHA1_HASH_LEN; 1038 break; 1039 case CRYPTO_SHA2_256_HMAC: 1040 mac_len = SHA2_256_HASH_LEN; 1041 break; 1042 case CRYPTO_SHA2_384_HMAC: 1043 mac_len = SHA2_384_HASH_LEN; 1044 break; 1045 default: 1046 return (EINVAL); 1047 } 1048 1049 /* Only TLS 1.0-1.2 are supported. */ 1050 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 1051 tls->params.tls_vminor < TLS_MINOR_VER_ZERO || 1052 tls->params.tls_vminor > TLS_MINOR_VER_TWO) 1053 return (EPROTONOSUPPORT); 1054 1055 /* AES-CBC is not supported for receive for TLS 1.0. */ 1056 if (direction == KTLS_RX && 1057 tls->params.tls_vminor == TLS_MINOR_VER_ZERO) 1058 return (EPROTONOSUPPORT); 1059 1060 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; 1061 csp.csp_mode = CSP_MODE_CIPHER; 1062 csp.csp_cipher_alg = CRYPTO_AES_CBC; 1063 csp.csp_cipher_key = tls->params.cipher_key; 1064 csp.csp_cipher_klen = tls->params.cipher_key_len; 1065 csp.csp_ivlen = AES_BLOCK_LEN; 1066 1067 mac_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; 1068 mac_csp.csp_mode = CSP_MODE_DIGEST; 1069 mac_csp.csp_auth_alg = tls->params.auth_algorithm; 1070 mac_csp.csp_auth_key = tls->params.auth_key; 1071 mac_csp.csp_auth_klen = tls->params.auth_key_len; 1072 break; 1073 case CRYPTO_CHACHA20_POLY1305: 1074 switch (tls->params.cipher_key_len) { 1075 case 256 / 8: 1076 break; 1077 default: 1078 return (EINVAL); 1079 } 1080 1081 /* Only TLS 1.2 and 1.3 are supported. */ 1082 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 1083 tls->params.tls_vminor < TLS_MINOR_VER_TWO || 1084 tls->params.tls_vminor > TLS_MINOR_VER_THREE) 1085 return (EPROTONOSUPPORT); 1086 1087 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD; 1088 csp.csp_mode = CSP_MODE_AEAD; 1089 csp.csp_cipher_alg = CRYPTO_CHACHA20_POLY1305; 1090 csp.csp_cipher_key = tls->params.cipher_key; 1091 csp.csp_cipher_klen = tls->params.cipher_key_len; 1092 csp.csp_ivlen = CHACHA20_POLY1305_IV_LEN; 1093 break; 1094 default: 1095 return (EPROTONOSUPPORT); 1096 } 1097 1098 os = malloc(sizeof(*os), M_KTLS_OCF, M_NOWAIT | M_ZERO); 1099 if (os == NULL) 1100 return (ENOMEM); 1101 1102 error = crypto_newsession(&os->sid, &csp, 1103 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE); 1104 if (error) { 1105 free(os, M_KTLS_OCF); 1106 return (error); 1107 } 1108 1109 if (mac_csp.csp_mode != CSP_MODE_NONE) { 1110 error = crypto_newsession(&os->mac_sid, &mac_csp, 1111 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE); 1112 if (error) { 1113 crypto_freesession(os->sid); 1114 free(os, M_KTLS_OCF); 1115 return (error); 1116 } 1117 os->mac_len = mac_len; 1118 } 1119 1120 if (recrypt_csp.csp_mode != CSP_MODE_NONE) { 1121 error = crypto_newsession(&os->recrypt_sid, &recrypt_csp, 1122 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE); 1123 if (error) { 1124 crypto_freesession(os->sid); 1125 free(os, M_KTLS_OCF); 1126 return (error); 1127 } 1128 } 1129 1130 mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF); 1131 tls->ocf_session = os; 1132 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 || 1133 tls->params.cipher_algorithm == CRYPTO_CHACHA20_POLY1305) { 1134 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE) 1135 os->sw = &ktls_ocf_tls13_aead_sw; 1136 else 1137 os->sw = &ktls_ocf_tls12_aead_sw; 1138 } else { 1139 os->sw = &ktls_ocf_tls_cbc_sw; 1140 if (tls->params.tls_vminor == TLS_MINOR_VER_ZERO) { 1141 os->implicit_iv = true; 1142 memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN); 1143 #ifdef INVARIANTS 1144 os->next_seqno = tls->next_seqno; 1145 #endif 1146 } 1147 } 1148 1149 /* 1150 * AES-CBC is always synchronous currently. Asynchronous 1151 * operation would require multiple callbacks and an additional 1152 * iovec array in ktls_ocf_encrypt_state. 1153 */ 1154 tls->sync_dispatch = CRYPTO_SESS_SYNC(os->sid) || 1155 tls->params.cipher_algorithm == CRYPTO_AES_CBC; 1156 return (0); 1157 } 1158 1159 int 1160 ktls_ocf_encrypt(struct ktls_ocf_encrypt_state *state, 1161 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 1162 int outiovcnt) 1163 { 1164 return (tls->ocf_session->sw->encrypt(state, tls, m, outiov, 1165 outiovcnt)); 1166 } 1167 1168 int 1169 ktls_ocf_decrypt(struct ktls_session *tls, const struct tls_record_layer *hdr, 1170 struct mbuf *m, uint64_t seqno, int *trailer_len) 1171 { 1172 return (tls->ocf_session->sw->decrypt(tls, hdr, m, seqno, trailer_len)); 1173 } 1174 1175 int 1176 ktls_ocf_recrypt(struct ktls_session *tls, const struct tls_record_layer *hdr, 1177 struct mbuf *m, uint64_t seqno) 1178 { 1179 return (tls->ocf_session->sw->recrypt(tls, hdr, m, seqno)); 1180 } 1181 1182 bool 1183 ktls_ocf_recrypt_supported(struct ktls_session *tls) 1184 { 1185 return (tls->ocf_session->sw->recrypt != NULL && 1186 tls->ocf_session->recrypt_sid != NULL); 1187 } 1188