1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2019 Netflix Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/counter.h> 34 #include <sys/endian.h> 35 #include <sys/kernel.h> 36 #include <sys/ktls.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/mbuf.h> 40 #include <sys/module.h> 41 #include <sys/mutex.h> 42 #include <sys/sysctl.h> 43 #include <sys/uio.h> 44 #include <vm/vm.h> 45 #include <vm/pmap.h> 46 #include <vm/vm_param.h> 47 #include <netinet/in.h> 48 #include <opencrypto/cryptodev.h> 49 #include <opencrypto/ktls.h> 50 51 struct ktls_ocf_sw { 52 /* Encrypt a single outbound TLS record. */ 53 int (*encrypt)(struct ktls_ocf_encrypt_state *state, 54 struct ktls_session *tls, struct mbuf *m, 55 struct iovec *outiov, int outiovcnt); 56 57 /* Re-encrypt a received TLS record that is partially decrypted. */ 58 int (*recrypt)(struct ktls_session *tls, 59 const struct tls_record_layer *hdr, struct mbuf *m, 60 uint64_t seqno); 61 62 /* Decrypt a received TLS record. */ 63 int (*decrypt)(struct ktls_session *tls, 64 const struct tls_record_layer *hdr, struct mbuf *m, 65 uint64_t seqno, int *trailer_len); 66 }; 67 68 struct ktls_ocf_session { 69 const struct ktls_ocf_sw *sw; 70 crypto_session_t sid; 71 crypto_session_t mac_sid; 72 crypto_session_t recrypt_sid; 73 struct mtx lock; 74 int mac_len; 75 bool implicit_iv; 76 77 /* Only used for TLS 1.0 with the implicit IV. */ 78 #ifdef INVARIANTS 79 bool in_progress; 80 uint64_t next_seqno; 81 #endif 82 char iv[AES_BLOCK_LEN]; 83 }; 84 85 struct ocf_operation { 86 struct ktls_ocf_session *os; 87 bool done; 88 }; 89 90 static MALLOC_DEFINE(M_KTLS_OCF, "ktls_ocf", "OCF KTLS"); 91 92 SYSCTL_DECL(_kern_ipc_tls); 93 SYSCTL_DECL(_kern_ipc_tls_stats); 94 95 static SYSCTL_NODE(_kern_ipc_tls_stats, OID_AUTO, ocf, 96 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 97 "Kernel TLS offload via OCF stats"); 98 99 static COUNTER_U64_DEFINE_EARLY(ocf_tls10_cbc_encrypts); 100 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls10_cbc_encrypts, 101 CTLFLAG_RD, &ocf_tls10_cbc_encrypts, 102 "Total number of OCF TLS 1.0 CBC encryption operations"); 103 104 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_decrypts); 105 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_decrypts, 106 CTLFLAG_RD, &ocf_tls11_cbc_decrypts, 107 "Total number of OCF TLS 1.1/1.2 CBC decryption operations"); 108 109 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_encrypts); 110 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_encrypts, 111 CTLFLAG_RD, &ocf_tls11_cbc_encrypts, 112 "Total number of OCF TLS 1.1/1.2 CBC encryption operations"); 113 114 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_decrypts); 115 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_decrypts, 116 CTLFLAG_RD, &ocf_tls12_gcm_decrypts, 117 "Total number of OCF TLS 1.2 GCM decryption operations"); 118 119 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_encrypts); 120 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_encrypts, 121 CTLFLAG_RD, &ocf_tls12_gcm_encrypts, 122 "Total number of OCF TLS 1.2 GCM encryption operations"); 123 124 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_recrypts); 125 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_recrypts, 126 CTLFLAG_RD, &ocf_tls12_gcm_recrypts, 127 "Total number of OCF TLS 1.2 GCM re-encryption operations"); 128 129 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_decrypts); 130 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_decrypts, 131 CTLFLAG_RD, &ocf_tls12_chacha20_decrypts, 132 "Total number of OCF TLS 1.2 Chacha20-Poly1305 decryption operations"); 133 134 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_encrypts); 135 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_encrypts, 136 CTLFLAG_RD, &ocf_tls12_chacha20_encrypts, 137 "Total number of OCF TLS 1.2 Chacha20-Poly1305 encryption operations"); 138 139 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_decrypts); 140 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_decrypts, 141 CTLFLAG_RD, &ocf_tls13_gcm_decrypts, 142 "Total number of OCF TLS 1.3 GCM decryption operations"); 143 144 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_encrypts); 145 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_encrypts, 146 CTLFLAG_RD, &ocf_tls13_gcm_encrypts, 147 "Total number of OCF TLS 1.3 GCM encryption operations"); 148 149 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_recrypts); 150 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_recrypts, 151 CTLFLAG_RD, &ocf_tls13_gcm_recrypts, 152 "Total number of OCF TLS 1.3 GCM re-encryption operations"); 153 154 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_decrypts); 155 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_decrypts, 156 CTLFLAG_RD, &ocf_tls13_chacha20_decrypts, 157 "Total number of OCF TLS 1.3 Chacha20-Poly1305 decryption operations"); 158 159 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_encrypts); 160 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_encrypts, 161 CTLFLAG_RD, &ocf_tls13_chacha20_encrypts, 162 "Total number of OCF TLS 1.3 Chacha20-Poly1305 encryption operations"); 163 164 static COUNTER_U64_DEFINE_EARLY(ocf_inplace); 165 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, inplace, 166 CTLFLAG_RD, &ocf_inplace, 167 "Total number of OCF in-place operations"); 168 169 static COUNTER_U64_DEFINE_EARLY(ocf_separate_output); 170 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, separate_output, 171 CTLFLAG_RD, &ocf_separate_output, 172 "Total number of OCF operations with a separate output buffer"); 173 174 static COUNTER_U64_DEFINE_EARLY(ocf_retries); 175 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, retries, CTLFLAG_RD, 176 &ocf_retries, 177 "Number of OCF encryption operation retries"); 178 179 static int 180 ktls_ocf_callback_sync(struct cryptop *crp __unused) 181 { 182 return (0); 183 } 184 185 static int 186 ktls_ocf_callback_async(struct cryptop *crp) 187 { 188 struct ocf_operation *oo; 189 190 oo = crp->crp_opaque; 191 mtx_lock(&oo->os->lock); 192 oo->done = true; 193 mtx_unlock(&oo->os->lock); 194 wakeup(oo); 195 return (0); 196 } 197 198 static int 199 ktls_ocf_dispatch(struct ktls_ocf_session *os, struct cryptop *crp) 200 { 201 struct ocf_operation oo; 202 int error; 203 bool async; 204 205 oo.os = os; 206 oo.done = false; 207 208 crp->crp_opaque = &oo; 209 for (;;) { 210 async = !CRYPTO_SESS_SYNC(crp->crp_session); 211 crp->crp_callback = async ? ktls_ocf_callback_async : 212 ktls_ocf_callback_sync; 213 214 error = crypto_dispatch(crp); 215 if (error) 216 break; 217 if (async) { 218 mtx_lock(&os->lock); 219 while (!oo.done) 220 mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0); 221 mtx_unlock(&os->lock); 222 } 223 224 if (crp->crp_etype != EAGAIN) { 225 error = crp->crp_etype; 226 break; 227 } 228 229 crp->crp_etype = 0; 230 crp->crp_flags &= ~CRYPTO_F_DONE; 231 oo.done = false; 232 counter_u64_add(ocf_retries, 1); 233 } 234 return (error); 235 } 236 237 static int 238 ktls_ocf_dispatch_async_cb(struct cryptop *crp) 239 { 240 struct ktls_ocf_encrypt_state *state; 241 int error; 242 243 state = crp->crp_opaque; 244 if (crp->crp_etype == EAGAIN) { 245 crp->crp_etype = 0; 246 crp->crp_flags &= ~CRYPTO_F_DONE; 247 counter_u64_add(ocf_retries, 1); 248 error = crypto_dispatch(crp); 249 if (error != 0) { 250 crypto_destroyreq(crp); 251 ktls_encrypt_cb(state, error); 252 } 253 return (0); 254 } 255 256 error = crp->crp_etype; 257 crypto_destroyreq(crp); 258 ktls_encrypt_cb(state, error); 259 return (0); 260 } 261 262 static int 263 ktls_ocf_dispatch_async(struct ktls_ocf_encrypt_state *state, 264 struct cryptop *crp) 265 { 266 int error; 267 268 crp->crp_opaque = state; 269 crp->crp_callback = ktls_ocf_dispatch_async_cb; 270 error = crypto_dispatch(crp); 271 if (error != 0) 272 crypto_destroyreq(crp); 273 return (error); 274 } 275 276 static int 277 ktls_ocf_tls_cbc_encrypt(struct ktls_ocf_encrypt_state *state, 278 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 279 int outiovcnt) 280 { 281 const struct tls_record_layer *hdr; 282 struct uio *uio; 283 struct tls_mac_data *ad; 284 struct cryptop *crp; 285 struct ktls_ocf_session *os; 286 struct iovec iov[m->m_epg_npgs + 2]; 287 u_int pgoff; 288 int i, error; 289 uint16_t tls_comp_len; 290 uint8_t pad; 291 292 MPASS(outiovcnt + 1 <= nitems(iov)); 293 294 os = tls->ocf_session; 295 hdr = (const struct tls_record_layer *)m->m_epg_hdr; 296 crp = &state->crp; 297 uio = &state->uio; 298 MPASS(tls->sync_dispatch); 299 300 #ifdef INVARIANTS 301 if (os->implicit_iv) { 302 mtx_lock(&os->lock); 303 KASSERT(!os->in_progress, 304 ("concurrent implicit IV encryptions")); 305 if (os->next_seqno != m->m_epg_seqno) { 306 printf("KTLS CBC: TLS records out of order. " 307 "Expected %ju, got %ju\n", 308 (uintmax_t)os->next_seqno, 309 (uintmax_t)m->m_epg_seqno); 310 mtx_unlock(&os->lock); 311 return (EINVAL); 312 } 313 os->in_progress = true; 314 mtx_unlock(&os->lock); 315 } 316 #endif 317 318 /* Payload length. */ 319 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen); 320 321 /* Initialize the AAD. */ 322 ad = &state->mac; 323 ad->seq = htobe64(m->m_epg_seqno); 324 ad->type = hdr->tls_type; 325 ad->tls_vmajor = hdr->tls_vmajor; 326 ad->tls_vminor = hdr->tls_vminor; 327 ad->tls_length = htons(tls_comp_len); 328 329 /* First, compute the MAC. */ 330 iov[0].iov_base = ad; 331 iov[0].iov_len = sizeof(*ad); 332 pgoff = m->m_epg_1st_off; 333 for (i = 0; i < m->m_epg_npgs; i++, pgoff = 0) { 334 iov[i + 1].iov_base = (void *)PHYS_TO_DMAP(m->m_epg_pa[i] + 335 pgoff); 336 iov[i + 1].iov_len = m_epg_pagelen(m, i, pgoff); 337 } 338 iov[m->m_epg_npgs + 1].iov_base = m->m_epg_trail; 339 iov[m->m_epg_npgs + 1].iov_len = os->mac_len; 340 uio->uio_iov = iov; 341 uio->uio_iovcnt = m->m_epg_npgs + 2; 342 uio->uio_offset = 0; 343 uio->uio_segflg = UIO_SYSSPACE; 344 uio->uio_td = curthread; 345 uio->uio_resid = sizeof(*ad) + tls_comp_len + os->mac_len; 346 347 crypto_initreq(crp, os->mac_sid); 348 crp->crp_payload_start = 0; 349 crp->crp_payload_length = sizeof(*ad) + tls_comp_len; 350 crp->crp_digest_start = crp->crp_payload_length; 351 crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST; 352 crp->crp_flags = CRYPTO_F_CBIMM; 353 crypto_use_uio(crp, uio); 354 error = ktls_ocf_dispatch(os, crp); 355 356 crypto_destroyreq(crp); 357 if (error) { 358 #ifdef INVARIANTS 359 if (os->implicit_iv) { 360 mtx_lock(&os->lock); 361 os->in_progress = false; 362 mtx_unlock(&os->lock); 363 } 364 #endif 365 return (error); 366 } 367 368 /* Second, add the padding. */ 369 pad = m->m_epg_trllen - os->mac_len - 1; 370 for (i = 0; i < pad + 1; i++) 371 m->m_epg_trail[os->mac_len + i] = pad; 372 373 /* Finally, encrypt the record. */ 374 crypto_initreq(crp, os->sid); 375 crp->crp_payload_start = m->m_epg_hdrlen; 376 crp->crp_payload_length = tls_comp_len + m->m_epg_trllen; 377 KASSERT(crp->crp_payload_length % AES_BLOCK_LEN == 0, 378 ("invalid encryption size")); 379 crypto_use_single_mbuf(crp, m); 380 crp->crp_op = CRYPTO_OP_ENCRYPT; 381 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 382 if (os->implicit_iv) 383 memcpy(crp->crp_iv, os->iv, AES_BLOCK_LEN); 384 else 385 memcpy(crp->crp_iv, hdr + 1, AES_BLOCK_LEN); 386 387 if (outiov != NULL) { 388 uio->uio_iov = outiov; 389 uio->uio_iovcnt = outiovcnt; 390 uio->uio_offset = 0; 391 uio->uio_segflg = UIO_SYSSPACE; 392 uio->uio_td = curthread; 393 uio->uio_resid = crp->crp_payload_length; 394 crypto_use_output_uio(crp, uio); 395 } 396 397 if (os->implicit_iv) 398 counter_u64_add(ocf_tls10_cbc_encrypts, 1); 399 else 400 counter_u64_add(ocf_tls11_cbc_encrypts, 1); 401 if (outiov != NULL) 402 counter_u64_add(ocf_separate_output, 1); 403 else 404 counter_u64_add(ocf_inplace, 1); 405 error = ktls_ocf_dispatch(os, crp); 406 407 crypto_destroyreq(crp); 408 409 if (os->implicit_iv) { 410 KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN, 411 ("trailer too short to read IV")); 412 memcpy(os->iv, m->m_epg_trail + m->m_epg_trllen - AES_BLOCK_LEN, 413 AES_BLOCK_LEN); 414 #ifdef INVARIANTS 415 mtx_lock(&os->lock); 416 os->next_seqno = m->m_epg_seqno + 1; 417 os->in_progress = false; 418 mtx_unlock(&os->lock); 419 #endif 420 } 421 return (error); 422 } 423 424 static int 425 check_padding(void *arg, void *data, u_int len) 426 { 427 uint8_t pad = *(uint8_t *)arg; 428 const char *cp = data; 429 430 while (len > 0) { 431 if (*cp != pad) 432 return (EBADMSG); 433 cp++; 434 len--; 435 } 436 return (0); 437 } 438 439 static int 440 ktls_ocf_tls_cbc_decrypt(struct ktls_session *tls, 441 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno, 442 int *trailer_len) 443 { 444 struct tls_mac_data ad; 445 struct cryptop crp; 446 struct uio uio; 447 struct ktls_ocf_session *os; 448 struct iovec *iov; 449 struct mbuf *n; 450 u_int iovcnt; 451 int i, error, skip; 452 uint16_t tls_len, tls_comp_len; 453 uint8_t pad; 454 455 os = tls->ocf_session; 456 457 /* 458 * Ensure record is a multiple of the cipher block size and 459 * contains at least an explicit IV, MAC, and at least one 460 * padding byte. 461 */ 462 tls_len = ntohs(hdr->tls_length); 463 if (tls_len % AES_BLOCK_LEN != 0 || 464 tls_len < AES_BLOCK_LEN + roundup2(os->mac_len + 1, AES_BLOCK_LEN)) 465 return (EMSGSIZE); 466 467 /* First, decrypt the record. */ 468 crypto_initreq(&crp, os->sid); 469 crp.crp_iv_start = sizeof(*hdr); 470 crp.crp_payload_start = tls->params.tls_hlen; 471 crp.crp_payload_length = tls_len - AES_BLOCK_LEN; 472 crypto_use_mbuf(&crp, m); 473 crp.crp_op = CRYPTO_OP_DECRYPT; 474 crp.crp_flags = CRYPTO_F_CBIMM; 475 476 counter_u64_add(ocf_tls11_cbc_decrypts, 1); 477 478 error = ktls_ocf_dispatch(os, &crp); 479 crypto_destroyreq(&crp); 480 if (error) 481 return (error); 482 483 /* Verify the padding. */ 484 m_copydata(m, sizeof(*hdr) + tls_len - 1, 1, &pad); 485 *trailer_len = os->mac_len + pad + 1; 486 if (AES_BLOCK_LEN + *trailer_len > tls_len) 487 return (EBADMSG); 488 error = m_apply(m, sizeof(*hdr) + tls_len - (pad + 1), pad + 1, 489 check_padding, &pad); 490 if (error) 491 return (error); 492 493 /* Verify the MAC. */ 494 tls_comp_len = tls_len - (AES_BLOCK_LEN + *trailer_len); 495 memset(&uio, 0, sizeof(uio)); 496 497 /* 498 * Allocate and populate the iov. Have to skip over the TLS 499 * header in 'm' as it is not part of the MAC input. 500 */ 501 iovcnt = 1; 502 for (n = m; n != NULL; n = n->m_next) 503 iovcnt++; 504 iov = malloc(iovcnt * sizeof(*iov), M_KTLS_OCF, M_WAITOK); 505 iov[0].iov_base = &ad; 506 iov[0].iov_len = sizeof(ad); 507 skip = sizeof(*hdr) + AES_BLOCK_LEN; 508 for (i = 1, n = m; n != NULL; i++, n = n->m_next) { 509 if (n->m_len < skip) { 510 skip -= n->m_len; 511 continue; 512 } 513 iov[i].iov_base = mtod(n, char *) + skip; 514 iov[i].iov_len = n->m_len - skip; 515 skip = 0; 516 } 517 uio.uio_iov = iov; 518 uio.uio_iovcnt = i; 519 uio.uio_segflg = UIO_SYSSPACE; 520 uio.uio_td = curthread; 521 uio.uio_resid = sizeof(ad) + tls_len - AES_BLOCK_LEN; 522 523 /* Initialize the AAD. */ 524 ad.seq = htobe64(seqno); 525 ad.type = hdr->tls_type; 526 ad.tls_vmajor = hdr->tls_vmajor; 527 ad.tls_vminor = hdr->tls_vminor; 528 ad.tls_length = htons(tls_comp_len); 529 530 crypto_initreq(&crp, os->mac_sid); 531 crp.crp_payload_start = 0; 532 crp.crp_payload_length = sizeof(ad) + tls_comp_len; 533 crp.crp_digest_start = crp.crp_payload_length; 534 crp.crp_op = CRYPTO_OP_VERIFY_DIGEST; 535 crp.crp_flags = CRYPTO_F_CBIMM; 536 crypto_use_uio(&crp, &uio); 537 error = ktls_ocf_dispatch(os, &crp); 538 539 crypto_destroyreq(&crp); 540 free(iov, M_KTLS_OCF); 541 return (error); 542 } 543 544 static const struct ktls_ocf_sw ktls_ocf_tls_cbc_sw = { 545 .encrypt = ktls_ocf_tls_cbc_encrypt, 546 .decrypt = ktls_ocf_tls_cbc_decrypt 547 }; 548 549 static int 550 ktls_ocf_tls12_aead_encrypt(struct ktls_ocf_encrypt_state *state, 551 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 552 int outiovcnt) 553 { 554 const struct tls_record_layer *hdr; 555 struct uio *uio; 556 struct tls_aead_data *ad; 557 struct cryptop *crp; 558 struct ktls_ocf_session *os; 559 int error; 560 uint16_t tls_comp_len; 561 562 os = tls->ocf_session; 563 hdr = (const struct tls_record_layer *)m->m_epg_hdr; 564 crp = &state->crp; 565 uio = &state->uio; 566 567 crypto_initreq(crp, os->sid); 568 569 /* Setup the IV. */ 570 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) { 571 memcpy(crp->crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN); 572 memcpy(crp->crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, 573 sizeof(uint64_t)); 574 } else { 575 /* 576 * Chacha20-Poly1305 constructs the IV for TLS 1.2 577 * identically to constructing the IV for AEAD in TLS 578 * 1.3. 579 */ 580 memcpy(crp->crp_iv, tls->params.iv, tls->params.iv_len); 581 *(uint64_t *)(crp->crp_iv + 4) ^= htobe64(m->m_epg_seqno); 582 } 583 584 /* Setup the AAD. */ 585 ad = &state->aead; 586 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen); 587 ad->seq = htobe64(m->m_epg_seqno); 588 ad->type = hdr->tls_type; 589 ad->tls_vmajor = hdr->tls_vmajor; 590 ad->tls_vminor = hdr->tls_vminor; 591 ad->tls_length = htons(tls_comp_len); 592 crp->crp_aad = ad; 593 crp->crp_aad_length = sizeof(*ad); 594 595 /* Set fields for input payload. */ 596 crypto_use_single_mbuf(crp, m); 597 crp->crp_payload_start = m->m_epg_hdrlen; 598 crp->crp_payload_length = tls_comp_len; 599 600 if (outiov != NULL) { 601 crp->crp_digest_start = crp->crp_payload_length; 602 603 uio->uio_iov = outiov; 604 uio->uio_iovcnt = outiovcnt; 605 uio->uio_offset = 0; 606 uio->uio_segflg = UIO_SYSSPACE; 607 uio->uio_td = curthread; 608 uio->uio_resid = crp->crp_payload_length + tls->params.tls_tlen; 609 crypto_use_output_uio(crp, uio); 610 } else 611 crp->crp_digest_start = crp->crp_payload_start + 612 crp->crp_payload_length; 613 614 crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; 615 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 616 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 617 counter_u64_add(ocf_tls12_gcm_encrypts, 1); 618 else 619 counter_u64_add(ocf_tls12_chacha20_encrypts, 1); 620 if (outiov != NULL) 621 counter_u64_add(ocf_separate_output, 1); 622 else 623 counter_u64_add(ocf_inplace, 1); 624 if (tls->sync_dispatch) { 625 error = ktls_ocf_dispatch(os, crp); 626 crypto_destroyreq(crp); 627 } else 628 error = ktls_ocf_dispatch_async(state, crp); 629 return (error); 630 } 631 632 static int 633 ktls_ocf_tls12_aead_decrypt(struct ktls_session *tls, 634 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno, 635 int *trailer_len) 636 { 637 struct tls_aead_data ad; 638 struct cryptop crp; 639 struct ktls_ocf_session *os; 640 int error; 641 uint16_t tls_comp_len, tls_len; 642 643 os = tls->ocf_session; 644 645 /* Ensure record contains at least an explicit IV and tag. */ 646 tls_len = ntohs(hdr->tls_length); 647 if (tls_len + sizeof(*hdr) < tls->params.tls_hlen + 648 tls->params.tls_tlen) 649 return (EMSGSIZE); 650 651 crypto_initreq(&crp, os->sid); 652 653 /* Setup the IV. */ 654 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) { 655 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN); 656 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, 657 sizeof(uint64_t)); 658 } else { 659 /* 660 * Chacha20-Poly1305 constructs the IV for TLS 1.2 661 * identically to constructing the IV for AEAD in TLS 662 * 1.3. 663 */ 664 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len); 665 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno); 666 } 667 668 /* Setup the AAD. */ 669 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 670 tls_comp_len = tls_len - 671 (AES_GMAC_HASH_LEN + sizeof(uint64_t)); 672 else 673 tls_comp_len = tls_len - POLY1305_HASH_LEN; 674 ad.seq = htobe64(seqno); 675 ad.type = hdr->tls_type; 676 ad.tls_vmajor = hdr->tls_vmajor; 677 ad.tls_vminor = hdr->tls_vminor; 678 ad.tls_length = htons(tls_comp_len); 679 crp.crp_aad = &ad; 680 crp.crp_aad_length = sizeof(ad); 681 682 crp.crp_payload_start = tls->params.tls_hlen; 683 crp.crp_payload_length = tls_comp_len; 684 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length; 685 686 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST; 687 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 688 crypto_use_mbuf(&crp, m); 689 690 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 691 counter_u64_add(ocf_tls12_gcm_decrypts, 1); 692 else 693 counter_u64_add(ocf_tls12_chacha20_decrypts, 1); 694 error = ktls_ocf_dispatch(os, &crp); 695 696 crypto_destroyreq(&crp); 697 *trailer_len = tls->params.tls_tlen; 698 return (error); 699 } 700 701 /* 702 * Reconstruct encrypted mbuf data in input buffer. 703 */ 704 static void 705 ktls_ocf_recrypt_fixup(struct mbuf *m, u_int skip, u_int len, char *buf) 706 { 707 const char *src = buf; 708 u_int todo; 709 710 while (skip >= m->m_len) { 711 skip -= m->m_len; 712 m = m->m_next; 713 } 714 715 while (len > 0) { 716 todo = m->m_len - skip; 717 if (todo > len) 718 todo = len; 719 720 if (m->m_flags & M_DECRYPTED) 721 memcpy(mtod(m, char *) + skip, src, todo); 722 src += todo; 723 len -= todo; 724 skip = 0; 725 m = m->m_next; 726 } 727 } 728 729 static int 730 ktls_ocf_tls12_aead_recrypt(struct ktls_session *tls, 731 const struct tls_record_layer *hdr, struct mbuf *m, 732 uint64_t seqno) 733 { 734 struct cryptop crp; 735 struct ktls_ocf_session *os; 736 char *buf; 737 u_int payload_len; 738 int error; 739 uint16_t tls_len; 740 741 os = tls->ocf_session; 742 743 /* Ensure record contains at least an explicit IV and tag. */ 744 tls_len = ntohs(hdr->tls_length); 745 if (tls_len < sizeof(uint64_t) + AES_GMAC_HASH_LEN) 746 return (EMSGSIZE); 747 748 crypto_initreq(&crp, os->recrypt_sid); 749 750 KASSERT(tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16, 751 ("%s: only AES-GCM is supported", __func__)); 752 753 /* Setup the IV. */ 754 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN); 755 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t)); 756 be32enc(crp.crp_iv + AES_GCM_IV_LEN, 2); 757 758 payload_len = tls_len - (AES_GMAC_HASH_LEN + sizeof(uint64_t)); 759 crp.crp_op = CRYPTO_OP_ENCRYPT; 760 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 761 crypto_use_mbuf(&crp, m); 762 crp.crp_payload_start = tls->params.tls_hlen; 763 crp.crp_payload_length = payload_len; 764 765 buf = malloc(payload_len, M_KTLS_OCF, M_WAITOK); 766 crypto_use_output_buf(&crp, buf, payload_len); 767 768 counter_u64_add(ocf_tls12_gcm_recrypts, 1); 769 error = ktls_ocf_dispatch(os, &crp); 770 771 crypto_destroyreq(&crp); 772 773 if (error == 0) 774 ktls_ocf_recrypt_fixup(m, tls->params.tls_hlen, payload_len, 775 buf); 776 777 free(buf, M_KTLS_OCF); 778 return (error); 779 } 780 781 static const struct ktls_ocf_sw ktls_ocf_tls12_aead_sw = { 782 .encrypt = ktls_ocf_tls12_aead_encrypt, 783 .recrypt = ktls_ocf_tls12_aead_recrypt, 784 .decrypt = ktls_ocf_tls12_aead_decrypt, 785 }; 786 787 static int 788 ktls_ocf_tls13_aead_encrypt(struct ktls_ocf_encrypt_state *state, 789 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 790 int outiovcnt) 791 { 792 const struct tls_record_layer *hdr; 793 struct uio *uio; 794 struct tls_aead_data_13 *ad; 795 struct cryptop *crp; 796 struct ktls_ocf_session *os; 797 int error; 798 799 os = tls->ocf_session; 800 hdr = (const struct tls_record_layer *)m->m_epg_hdr; 801 crp = &state->crp; 802 uio = &state->uio; 803 804 crypto_initreq(crp, os->sid); 805 806 /* Setup the nonce. */ 807 memcpy(crp->crp_iv, tls->params.iv, tls->params.iv_len); 808 *(uint64_t *)(crp->crp_iv + 4) ^= htobe64(m->m_epg_seqno); 809 810 /* Setup the AAD. */ 811 ad = &state->aead13; 812 ad->type = hdr->tls_type; 813 ad->tls_vmajor = hdr->tls_vmajor; 814 ad->tls_vminor = hdr->tls_vminor; 815 ad->tls_length = hdr->tls_length; 816 crp->crp_aad = ad; 817 crp->crp_aad_length = sizeof(*ad); 818 819 /* Set fields for input payload. */ 820 crypto_use_single_mbuf(crp, m); 821 crp->crp_payload_start = m->m_epg_hdrlen; 822 crp->crp_payload_length = m->m_len - 823 (m->m_epg_hdrlen + m->m_epg_trllen); 824 825 /* Store the record type as the first byte of the trailer. */ 826 m->m_epg_trail[0] = m->m_epg_record_type; 827 crp->crp_payload_length++; 828 829 if (outiov != NULL) { 830 crp->crp_digest_start = crp->crp_payload_length; 831 832 uio->uio_iov = outiov; 833 uio->uio_iovcnt = outiovcnt; 834 uio->uio_offset = 0; 835 uio->uio_segflg = UIO_SYSSPACE; 836 uio->uio_td = curthread; 837 uio->uio_resid = m->m_len - m->m_epg_hdrlen; 838 crypto_use_output_uio(crp, uio); 839 } else 840 crp->crp_digest_start = crp->crp_payload_start + 841 crp->crp_payload_length; 842 843 crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; 844 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 845 846 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 847 counter_u64_add(ocf_tls13_gcm_encrypts, 1); 848 else 849 counter_u64_add(ocf_tls13_chacha20_encrypts, 1); 850 if (outiov != NULL) 851 counter_u64_add(ocf_separate_output, 1); 852 else 853 counter_u64_add(ocf_inplace, 1); 854 if (tls->sync_dispatch) { 855 error = ktls_ocf_dispatch(os, crp); 856 crypto_destroyreq(crp); 857 } else 858 error = ktls_ocf_dispatch_async(state, crp); 859 return (error); 860 } 861 862 static int 863 ktls_ocf_tls13_aead_decrypt(struct ktls_session *tls, 864 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno, 865 int *trailer_len) 866 { 867 struct tls_aead_data_13 ad; 868 struct cryptop crp; 869 struct ktls_ocf_session *os; 870 int error; 871 u_int tag_len; 872 uint16_t tls_len; 873 874 os = tls->ocf_session; 875 876 tag_len = tls->params.tls_tlen - 1; 877 878 /* Payload must contain at least one byte for the record type. */ 879 tls_len = ntohs(hdr->tls_length); 880 if (tls_len < tag_len + 1) 881 return (EMSGSIZE); 882 883 crypto_initreq(&crp, os->sid); 884 885 /* Setup the nonce. */ 886 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len); 887 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno); 888 889 /* Setup the AAD. */ 890 ad.type = hdr->tls_type; 891 ad.tls_vmajor = hdr->tls_vmajor; 892 ad.tls_vminor = hdr->tls_vminor; 893 ad.tls_length = hdr->tls_length; 894 crp.crp_aad = &ad; 895 crp.crp_aad_length = sizeof(ad); 896 897 crp.crp_payload_start = tls->params.tls_hlen; 898 crp.crp_payload_length = tls_len - tag_len; 899 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length; 900 901 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST; 902 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 903 crypto_use_mbuf(&crp, m); 904 905 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 906 counter_u64_add(ocf_tls13_gcm_decrypts, 1); 907 else 908 counter_u64_add(ocf_tls13_chacha20_decrypts, 1); 909 error = ktls_ocf_dispatch(os, &crp); 910 911 crypto_destroyreq(&crp); 912 *trailer_len = tag_len; 913 return (error); 914 } 915 916 static int 917 ktls_ocf_tls13_aead_recrypt(struct ktls_session *tls, 918 const struct tls_record_layer *hdr, struct mbuf *m, 919 uint64_t seqno) 920 { 921 struct cryptop crp; 922 struct ktls_ocf_session *os; 923 char *buf; 924 u_int payload_len; 925 int error; 926 uint16_t tls_len; 927 928 os = tls->ocf_session; 929 930 /* Payload must contain at least one byte for the record type. */ 931 tls_len = ntohs(hdr->tls_length); 932 if (tls_len < AES_GMAC_HASH_LEN + 1) 933 return (EMSGSIZE); 934 935 crypto_initreq(&crp, os->recrypt_sid); 936 937 KASSERT(tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16, 938 ("%s: only AES-GCM is supported", __func__)); 939 940 /* Setup the IV. */ 941 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len); 942 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno); 943 be32enc(crp.crp_iv + 12, 2); 944 945 payload_len = tls_len - AES_GMAC_HASH_LEN; 946 crp.crp_op = CRYPTO_OP_ENCRYPT; 947 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 948 crypto_use_mbuf(&crp, m); 949 crp.crp_payload_start = tls->params.tls_hlen; 950 crp.crp_payload_length = payload_len; 951 952 buf = malloc(payload_len, M_KTLS_OCF, M_WAITOK); 953 crypto_use_output_buf(&crp, buf, payload_len); 954 955 counter_u64_add(ocf_tls13_gcm_recrypts, 1); 956 error = ktls_ocf_dispatch(os, &crp); 957 958 crypto_destroyreq(&crp); 959 960 if (error == 0) 961 ktls_ocf_recrypt_fixup(m, tls->params.tls_hlen, payload_len, 962 buf); 963 964 free(buf, M_KTLS_OCF); 965 return (error); 966 } 967 968 static const struct ktls_ocf_sw ktls_ocf_tls13_aead_sw = { 969 .encrypt = ktls_ocf_tls13_aead_encrypt, 970 .recrypt = ktls_ocf_tls13_aead_recrypt, 971 .decrypt = ktls_ocf_tls13_aead_decrypt, 972 }; 973 974 void 975 ktls_ocf_free(struct ktls_session *tls) 976 { 977 struct ktls_ocf_session *os; 978 979 os = tls->ocf_session; 980 crypto_freesession(os->sid); 981 crypto_freesession(os->mac_sid); 982 crypto_freesession(os->recrypt_sid); 983 mtx_destroy(&os->lock); 984 zfree(os, M_KTLS_OCF); 985 } 986 987 int 988 ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction) 989 { 990 struct crypto_session_params csp, mac_csp, recrypt_csp; 991 struct ktls_ocf_session *os; 992 int error, mac_len; 993 994 memset(&csp, 0, sizeof(csp)); 995 memset(&mac_csp, 0, sizeof(mac_csp)); 996 mac_csp.csp_mode = CSP_MODE_NONE; 997 mac_len = 0; 998 memset(&recrypt_csp, 0, sizeof(mac_csp)); 999 recrypt_csp.csp_mode = CSP_MODE_NONE; 1000 1001 switch (tls->params.cipher_algorithm) { 1002 case CRYPTO_AES_NIST_GCM_16: 1003 switch (tls->params.cipher_key_len) { 1004 case 128 / 8: 1005 case 256 / 8: 1006 break; 1007 default: 1008 return (EINVAL); 1009 } 1010 1011 /* Only TLS 1.2 and 1.3 are supported. */ 1012 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 1013 tls->params.tls_vminor < TLS_MINOR_VER_TWO || 1014 tls->params.tls_vminor > TLS_MINOR_VER_THREE) 1015 return (EPROTONOSUPPORT); 1016 1017 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD; 1018 csp.csp_mode = CSP_MODE_AEAD; 1019 csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16; 1020 csp.csp_cipher_key = tls->params.cipher_key; 1021 csp.csp_cipher_klen = tls->params.cipher_key_len; 1022 csp.csp_ivlen = AES_GCM_IV_LEN; 1023 1024 recrypt_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; 1025 recrypt_csp.csp_mode = CSP_MODE_CIPHER; 1026 recrypt_csp.csp_cipher_alg = CRYPTO_AES_ICM; 1027 recrypt_csp.csp_cipher_key = tls->params.cipher_key; 1028 recrypt_csp.csp_cipher_klen = tls->params.cipher_key_len; 1029 recrypt_csp.csp_ivlen = AES_BLOCK_LEN; 1030 break; 1031 case CRYPTO_AES_CBC: 1032 switch (tls->params.cipher_key_len) { 1033 case 128 / 8: 1034 case 256 / 8: 1035 break; 1036 default: 1037 return (EINVAL); 1038 } 1039 1040 switch (tls->params.auth_algorithm) { 1041 case CRYPTO_SHA1_HMAC: 1042 mac_len = SHA1_HASH_LEN; 1043 break; 1044 case CRYPTO_SHA2_256_HMAC: 1045 mac_len = SHA2_256_HASH_LEN; 1046 break; 1047 case CRYPTO_SHA2_384_HMAC: 1048 mac_len = SHA2_384_HASH_LEN; 1049 break; 1050 default: 1051 return (EINVAL); 1052 } 1053 1054 /* Only TLS 1.0-1.2 are supported. */ 1055 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 1056 tls->params.tls_vminor < TLS_MINOR_VER_ZERO || 1057 tls->params.tls_vminor > TLS_MINOR_VER_TWO) 1058 return (EPROTONOSUPPORT); 1059 1060 /* AES-CBC is not supported for receive for TLS 1.0. */ 1061 if (direction == KTLS_RX && 1062 tls->params.tls_vminor == TLS_MINOR_VER_ZERO) 1063 return (EPROTONOSUPPORT); 1064 1065 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; 1066 csp.csp_mode = CSP_MODE_CIPHER; 1067 csp.csp_cipher_alg = CRYPTO_AES_CBC; 1068 csp.csp_cipher_key = tls->params.cipher_key; 1069 csp.csp_cipher_klen = tls->params.cipher_key_len; 1070 csp.csp_ivlen = AES_BLOCK_LEN; 1071 1072 mac_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; 1073 mac_csp.csp_mode = CSP_MODE_DIGEST; 1074 mac_csp.csp_auth_alg = tls->params.auth_algorithm; 1075 mac_csp.csp_auth_key = tls->params.auth_key; 1076 mac_csp.csp_auth_klen = tls->params.auth_key_len; 1077 break; 1078 case CRYPTO_CHACHA20_POLY1305: 1079 switch (tls->params.cipher_key_len) { 1080 case 256 / 8: 1081 break; 1082 default: 1083 return (EINVAL); 1084 } 1085 1086 /* Only TLS 1.2 and 1.3 are supported. */ 1087 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 1088 tls->params.tls_vminor < TLS_MINOR_VER_TWO || 1089 tls->params.tls_vminor > TLS_MINOR_VER_THREE) 1090 return (EPROTONOSUPPORT); 1091 1092 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD; 1093 csp.csp_mode = CSP_MODE_AEAD; 1094 csp.csp_cipher_alg = CRYPTO_CHACHA20_POLY1305; 1095 csp.csp_cipher_key = tls->params.cipher_key; 1096 csp.csp_cipher_klen = tls->params.cipher_key_len; 1097 csp.csp_ivlen = CHACHA20_POLY1305_IV_LEN; 1098 break; 1099 default: 1100 return (EPROTONOSUPPORT); 1101 } 1102 1103 os = malloc(sizeof(*os), M_KTLS_OCF, M_NOWAIT | M_ZERO); 1104 if (os == NULL) 1105 return (ENOMEM); 1106 1107 error = crypto_newsession(&os->sid, &csp, 1108 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE); 1109 if (error) { 1110 free(os, M_KTLS_OCF); 1111 return (error); 1112 } 1113 1114 if (mac_csp.csp_mode != CSP_MODE_NONE) { 1115 error = crypto_newsession(&os->mac_sid, &mac_csp, 1116 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE); 1117 if (error) { 1118 crypto_freesession(os->sid); 1119 free(os, M_KTLS_OCF); 1120 return (error); 1121 } 1122 os->mac_len = mac_len; 1123 } 1124 1125 if (recrypt_csp.csp_mode != CSP_MODE_NONE) { 1126 error = crypto_newsession(&os->recrypt_sid, &recrypt_csp, 1127 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE); 1128 if (error) { 1129 crypto_freesession(os->sid); 1130 free(os, M_KTLS_OCF); 1131 return (error); 1132 } 1133 } 1134 1135 mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF); 1136 tls->ocf_session = os; 1137 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 || 1138 tls->params.cipher_algorithm == CRYPTO_CHACHA20_POLY1305) { 1139 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE) 1140 os->sw = &ktls_ocf_tls13_aead_sw; 1141 else 1142 os->sw = &ktls_ocf_tls12_aead_sw; 1143 } else { 1144 os->sw = &ktls_ocf_tls_cbc_sw; 1145 if (tls->params.tls_vminor == TLS_MINOR_VER_ZERO) { 1146 os->implicit_iv = true; 1147 memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN); 1148 #ifdef INVARIANTS 1149 os->next_seqno = tls->next_seqno; 1150 #endif 1151 } 1152 } 1153 1154 /* 1155 * AES-CBC is always synchronous currently. Asynchronous 1156 * operation would require multiple callbacks and an additional 1157 * iovec array in ktls_ocf_encrypt_state. 1158 */ 1159 tls->sync_dispatch = CRYPTO_SESS_SYNC(os->sid) || 1160 tls->params.cipher_algorithm == CRYPTO_AES_CBC; 1161 return (0); 1162 } 1163 1164 int 1165 ktls_ocf_encrypt(struct ktls_ocf_encrypt_state *state, 1166 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 1167 int outiovcnt) 1168 { 1169 return (tls->ocf_session->sw->encrypt(state, tls, m, outiov, 1170 outiovcnt)); 1171 } 1172 1173 int 1174 ktls_ocf_decrypt(struct ktls_session *tls, const struct tls_record_layer *hdr, 1175 struct mbuf *m, uint64_t seqno, int *trailer_len) 1176 { 1177 return (tls->ocf_session->sw->decrypt(tls, hdr, m, seqno, trailer_len)); 1178 } 1179 1180 int 1181 ktls_ocf_recrypt(struct ktls_session *tls, const struct tls_record_layer *hdr, 1182 struct mbuf *m, uint64_t seqno) 1183 { 1184 return (tls->ocf_session->sw->recrypt(tls, hdr, m, seqno)); 1185 } 1186 1187 bool 1188 ktls_ocf_recrypt_supported(struct ktls_session *tls) 1189 { 1190 return (tls->ocf_session->sw->recrypt != NULL && 1191 tls->ocf_session->recrypt_sid != NULL); 1192 } 1193