1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2019 Netflix Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/counter.h> 35 #include <sys/endian.h> 36 #include <sys/kernel.h> 37 #include <sys/ktls.h> 38 #include <sys/lock.h> 39 #include <sys/malloc.h> 40 #include <sys/mbuf.h> 41 #include <sys/module.h> 42 #include <sys/mutex.h> 43 #include <sys/sysctl.h> 44 #include <sys/uio.h> 45 #include <vm/vm.h> 46 #include <vm/pmap.h> 47 #include <vm/vm_param.h> 48 #include <opencrypto/cryptodev.h> 49 #include <opencrypto/ktls.h> 50 51 struct ktls_ocf_session { 52 crypto_session_t sid; 53 crypto_session_t mac_sid; 54 struct mtx lock; 55 int mac_len; 56 bool implicit_iv; 57 58 /* Only used for TLS 1.0 with the implicit IV. */ 59 #ifdef INVARIANTS 60 bool in_progress; 61 uint64_t next_seqno; 62 #endif 63 char iv[AES_BLOCK_LEN]; 64 }; 65 66 struct ocf_operation { 67 struct ktls_ocf_session *os; 68 bool done; 69 }; 70 71 static MALLOC_DEFINE(M_KTLS_OCF, "ktls_ocf", "OCF KTLS"); 72 73 SYSCTL_DECL(_kern_ipc_tls); 74 SYSCTL_DECL(_kern_ipc_tls_stats); 75 76 static SYSCTL_NODE(_kern_ipc_tls_stats, OID_AUTO, ocf, 77 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 78 "Kernel TLS offload via OCF stats"); 79 80 static COUNTER_U64_DEFINE_EARLY(ocf_tls10_cbc_crypts); 81 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls10_cbc_crypts, 82 CTLFLAG_RD, &ocf_tls10_cbc_crypts, 83 "Total number of OCF TLS 1.0 CBC encryption operations"); 84 85 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_crypts); 86 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_crypts, 87 CTLFLAG_RD, &ocf_tls11_cbc_crypts, 88 "Total number of OCF TLS 1.1/1.2 CBC encryption operations"); 89 90 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_crypts); 91 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_crypts, 92 CTLFLAG_RD, &ocf_tls12_gcm_crypts, 93 "Total number of OCF TLS 1.2 GCM encryption operations"); 94 95 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_crypts); 96 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_crypts, 97 CTLFLAG_RD, &ocf_tls12_chacha20_crypts, 98 "Total number of OCF TLS 1.2 Chacha20-Poly1305 encryption operations"); 99 100 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_crypts); 101 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_crypts, 102 CTLFLAG_RD, &ocf_tls13_gcm_crypts, 103 "Total number of OCF TLS 1.3 GCM encryption operations"); 104 105 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_crypts); 106 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_crypts, 107 CTLFLAG_RD, &ocf_tls13_chacha20_crypts, 108 "Total number of OCF TLS 1.3 Chacha20-Poly1305 encryption operations"); 109 110 static COUNTER_U64_DEFINE_EARLY(ocf_inplace); 111 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, inplace, 112 CTLFLAG_RD, &ocf_inplace, 113 "Total number of OCF in-place operations"); 114 115 static COUNTER_U64_DEFINE_EARLY(ocf_separate_output); 116 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, separate_output, 117 CTLFLAG_RD, &ocf_separate_output, 118 "Total number of OCF operations with a separate output buffer"); 119 120 static COUNTER_U64_DEFINE_EARLY(ocf_retries); 121 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, retries, CTLFLAG_RD, 122 &ocf_retries, 123 "Number of OCF encryption operation retries"); 124 125 static int 126 ktls_ocf_callback_sync(struct cryptop *crp __unused) 127 { 128 return (0); 129 } 130 131 static int 132 ktls_ocf_callback_async(struct cryptop *crp) 133 { 134 struct ocf_operation *oo; 135 136 oo = crp->crp_opaque; 137 mtx_lock(&oo->os->lock); 138 oo->done = true; 139 mtx_unlock(&oo->os->lock); 140 wakeup(oo); 141 return (0); 142 } 143 144 static int 145 ktls_ocf_dispatch(struct ktls_ocf_session *os, struct cryptop *crp) 146 { 147 struct ocf_operation oo; 148 int error; 149 bool async; 150 151 oo.os = os; 152 oo.done = false; 153 154 crp->crp_opaque = &oo; 155 for (;;) { 156 async = !CRYPTO_SESS_SYNC(crp->crp_session); 157 crp->crp_callback = async ? ktls_ocf_callback_async : 158 ktls_ocf_callback_sync; 159 160 error = crypto_dispatch(crp); 161 if (error) 162 break; 163 if (async) { 164 mtx_lock(&os->lock); 165 while (!oo.done) 166 mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0); 167 mtx_unlock(&os->lock); 168 } 169 170 if (crp->crp_etype != EAGAIN) { 171 error = crp->crp_etype; 172 break; 173 } 174 175 crp->crp_etype = 0; 176 crp->crp_flags &= ~CRYPTO_F_DONE; 177 oo.done = false; 178 counter_u64_add(ocf_retries, 1); 179 } 180 return (error); 181 } 182 183 static int 184 ktls_ocf_dispatch_async_cb(struct cryptop *crp) 185 { 186 struct ktls_ocf_encrypt_state *state; 187 int error; 188 189 state = crp->crp_opaque; 190 if (crp->crp_etype == EAGAIN) { 191 crp->crp_etype = 0; 192 crp->crp_flags &= ~CRYPTO_F_DONE; 193 counter_u64_add(ocf_retries, 1); 194 error = crypto_dispatch(crp); 195 if (error != 0) { 196 crypto_destroyreq(crp); 197 ktls_encrypt_cb(state, error); 198 } 199 return (0); 200 } 201 202 error = crp->crp_etype; 203 crypto_destroyreq(crp); 204 ktls_encrypt_cb(state, error); 205 return (0); 206 } 207 208 static int 209 ktls_ocf_dispatch_async(struct ktls_ocf_encrypt_state *state, 210 struct cryptop *crp) 211 { 212 int error; 213 214 crp->crp_opaque = state; 215 crp->crp_callback = ktls_ocf_dispatch_async_cb; 216 error = crypto_dispatch(crp); 217 if (error != 0) 218 crypto_destroyreq(crp); 219 return (error); 220 } 221 222 static int 223 ktls_ocf_tls_cbc_encrypt(struct ktls_ocf_encrypt_state *state, 224 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 225 int outiovcnt) 226 { 227 const struct tls_record_layer *hdr; 228 struct uio *uio; 229 struct tls_mac_data *ad; 230 struct cryptop *crp; 231 struct ktls_ocf_session *os; 232 struct iovec iov[m->m_epg_npgs + 2]; 233 u_int pgoff; 234 int i, error; 235 uint16_t tls_comp_len; 236 uint8_t pad; 237 238 MPASS(outiovcnt + 1 <= nitems(iov)); 239 240 os = tls->ocf_session; 241 hdr = (const struct tls_record_layer *)m->m_epg_hdr; 242 crp = &state->crp; 243 uio = &state->uio; 244 MPASS(tls->sync_dispatch); 245 246 #ifdef INVARIANTS 247 if (os->implicit_iv) { 248 mtx_lock(&os->lock); 249 KASSERT(!os->in_progress, 250 ("concurrent implicit IV encryptions")); 251 if (os->next_seqno != m->m_epg_seqno) { 252 printf("KTLS CBC: TLS records out of order. " 253 "Expected %ju, got %ju\n", 254 (uintmax_t)os->next_seqno, 255 (uintmax_t)m->m_epg_seqno); 256 mtx_unlock(&os->lock); 257 return (EINVAL); 258 } 259 os->in_progress = true; 260 mtx_unlock(&os->lock); 261 } 262 #endif 263 264 /* Payload length. */ 265 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen); 266 267 /* Initialize the AAD. */ 268 ad = &state->mac; 269 ad->seq = htobe64(m->m_epg_seqno); 270 ad->type = hdr->tls_type; 271 ad->tls_vmajor = hdr->tls_vmajor; 272 ad->tls_vminor = hdr->tls_vminor; 273 ad->tls_length = htons(tls_comp_len); 274 275 /* First, compute the MAC. */ 276 iov[0].iov_base = ad; 277 iov[0].iov_len = sizeof(*ad); 278 pgoff = m->m_epg_1st_off; 279 for (i = 0; i < m->m_epg_npgs; i++, pgoff = 0) { 280 iov[i + 1].iov_base = (void *)PHYS_TO_DMAP(m->m_epg_pa[i] + 281 pgoff); 282 iov[i + 1].iov_len = m_epg_pagelen(m, i, pgoff); 283 } 284 iov[m->m_epg_npgs + 1].iov_base = m->m_epg_trail; 285 iov[m->m_epg_npgs + 1].iov_len = os->mac_len; 286 uio->uio_iov = iov; 287 uio->uio_iovcnt = m->m_epg_npgs + 2; 288 uio->uio_offset = 0; 289 uio->uio_segflg = UIO_SYSSPACE; 290 uio->uio_td = curthread; 291 uio->uio_resid = sizeof(*ad) + tls_comp_len + os->mac_len; 292 293 crypto_initreq(crp, os->mac_sid); 294 crp->crp_payload_start = 0; 295 crp->crp_payload_length = sizeof(*ad) + tls_comp_len; 296 crp->crp_digest_start = crp->crp_payload_length; 297 crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST; 298 crp->crp_flags = CRYPTO_F_CBIMM; 299 crypto_use_uio(crp, uio); 300 error = ktls_ocf_dispatch(os, crp); 301 302 crypto_destroyreq(crp); 303 if (error) { 304 #ifdef INVARIANTS 305 if (os->implicit_iv) { 306 mtx_lock(&os->lock); 307 os->in_progress = false; 308 mtx_unlock(&os->lock); 309 } 310 #endif 311 return (error); 312 } 313 314 /* Second, add the padding. */ 315 pad = m->m_epg_trllen - os->mac_len - 1; 316 for (i = 0; i < pad + 1; i++) 317 m->m_epg_trail[os->mac_len + i] = pad; 318 319 /* Finally, encrypt the record. */ 320 crypto_initreq(crp, os->sid); 321 crp->crp_payload_start = m->m_epg_hdrlen; 322 crp->crp_payload_length = tls_comp_len + m->m_epg_trllen; 323 KASSERT(crp->crp_payload_length % AES_BLOCK_LEN == 0, 324 ("invalid encryption size")); 325 crypto_use_single_mbuf(crp, m); 326 crp->crp_op = CRYPTO_OP_ENCRYPT; 327 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 328 if (os->implicit_iv) 329 memcpy(crp->crp_iv, os->iv, AES_BLOCK_LEN); 330 else 331 memcpy(crp->crp_iv, hdr + 1, AES_BLOCK_LEN); 332 333 if (outiov != NULL) { 334 uio->uio_iov = outiov; 335 uio->uio_iovcnt = outiovcnt; 336 uio->uio_offset = 0; 337 uio->uio_segflg = UIO_SYSSPACE; 338 uio->uio_td = curthread; 339 uio->uio_resid = crp->crp_payload_length; 340 crypto_use_output_uio(crp, uio); 341 } 342 343 if (os->implicit_iv) 344 counter_u64_add(ocf_tls10_cbc_crypts, 1); 345 else 346 counter_u64_add(ocf_tls11_cbc_crypts, 1); 347 if (outiov != NULL) 348 counter_u64_add(ocf_separate_output, 1); 349 else 350 counter_u64_add(ocf_inplace, 1); 351 error = ktls_ocf_dispatch(os, crp); 352 353 crypto_destroyreq(crp); 354 355 if (os->implicit_iv) { 356 KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN, 357 ("trailer too short to read IV")); 358 memcpy(os->iv, m->m_epg_trail + m->m_epg_trllen - AES_BLOCK_LEN, 359 AES_BLOCK_LEN); 360 #ifdef INVARIANTS 361 mtx_lock(&os->lock); 362 os->next_seqno = m->m_epg_seqno + 1; 363 os->in_progress = false; 364 mtx_unlock(&os->lock); 365 #endif 366 } 367 return (error); 368 } 369 370 static int 371 ktls_ocf_tls12_aead_encrypt(struct ktls_ocf_encrypt_state *state, 372 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 373 int outiovcnt) 374 { 375 const struct tls_record_layer *hdr; 376 struct uio *uio; 377 struct tls_aead_data *ad; 378 struct cryptop *crp; 379 struct ktls_ocf_session *os; 380 int error; 381 uint16_t tls_comp_len; 382 383 os = tls->ocf_session; 384 hdr = (const struct tls_record_layer *)m->m_epg_hdr; 385 crp = &state->crp; 386 uio = &state->uio; 387 388 crypto_initreq(crp, os->sid); 389 390 /* Setup the IV. */ 391 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) { 392 memcpy(crp->crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN); 393 memcpy(crp->crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, 394 sizeof(uint64_t)); 395 } else { 396 /* 397 * Chacha20-Poly1305 constructs the IV for TLS 1.2 398 * identically to constructing the IV for AEAD in TLS 399 * 1.3. 400 */ 401 memcpy(crp->crp_iv, tls->params.iv, tls->params.iv_len); 402 *(uint64_t *)(crp->crp_iv + 4) ^= htobe64(m->m_epg_seqno); 403 } 404 405 /* Setup the AAD. */ 406 ad = &state->aead; 407 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen); 408 ad->seq = htobe64(m->m_epg_seqno); 409 ad->type = hdr->tls_type; 410 ad->tls_vmajor = hdr->tls_vmajor; 411 ad->tls_vminor = hdr->tls_vminor; 412 ad->tls_length = htons(tls_comp_len); 413 crp->crp_aad = ad; 414 crp->crp_aad_length = sizeof(*ad); 415 416 /* Set fields for input payload. */ 417 crypto_use_single_mbuf(crp, m); 418 crp->crp_payload_start = m->m_epg_hdrlen; 419 crp->crp_payload_length = tls_comp_len; 420 421 if (outiov != NULL) { 422 crp->crp_digest_start = crp->crp_payload_length; 423 424 uio->uio_iov = outiov; 425 uio->uio_iovcnt = outiovcnt; 426 uio->uio_offset = 0; 427 uio->uio_segflg = UIO_SYSSPACE; 428 uio->uio_td = curthread; 429 uio->uio_resid = crp->crp_payload_length + tls->params.tls_tlen; 430 crypto_use_output_uio(crp, uio); 431 } else 432 crp->crp_digest_start = crp->crp_payload_start + 433 crp->crp_payload_length; 434 435 crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; 436 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 437 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 438 counter_u64_add(ocf_tls12_gcm_crypts, 1); 439 else 440 counter_u64_add(ocf_tls12_chacha20_crypts, 1); 441 if (outiov != NULL) 442 counter_u64_add(ocf_separate_output, 1); 443 else 444 counter_u64_add(ocf_inplace, 1); 445 if (tls->sync_dispatch) { 446 error = ktls_ocf_dispatch(os, crp); 447 crypto_destroyreq(crp); 448 } else 449 error = ktls_ocf_dispatch_async(state, crp); 450 return (error); 451 } 452 453 static int 454 ktls_ocf_tls12_aead_decrypt(struct ktls_session *tls, 455 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno, 456 int *trailer_len) 457 { 458 struct tls_aead_data ad; 459 struct cryptop crp; 460 struct ktls_ocf_session *os; 461 struct ocf_operation oo; 462 int error; 463 uint16_t tls_comp_len; 464 465 os = tls->ocf_session; 466 467 oo.os = os; 468 oo.done = false; 469 470 crypto_initreq(&crp, os->sid); 471 472 /* Setup the IV. */ 473 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) { 474 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN); 475 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, 476 sizeof(uint64_t)); 477 } else { 478 /* 479 * Chacha20-Poly1305 constructs the IV for TLS 1.2 480 * identically to constructing the IV for AEAD in TLS 481 * 1.3. 482 */ 483 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len); 484 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno); 485 } 486 487 /* Setup the AAD. */ 488 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 489 tls_comp_len = ntohs(hdr->tls_length) - 490 (AES_GMAC_HASH_LEN + sizeof(uint64_t)); 491 else 492 tls_comp_len = ntohs(hdr->tls_length) - POLY1305_HASH_LEN; 493 ad.seq = htobe64(seqno); 494 ad.type = hdr->tls_type; 495 ad.tls_vmajor = hdr->tls_vmajor; 496 ad.tls_vminor = hdr->tls_vminor; 497 ad.tls_length = htons(tls_comp_len); 498 crp.crp_aad = &ad; 499 crp.crp_aad_length = sizeof(ad); 500 501 crp.crp_payload_start = tls->params.tls_hlen; 502 crp.crp_payload_length = tls_comp_len; 503 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length; 504 505 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST; 506 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 507 crypto_use_mbuf(&crp, m); 508 509 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 510 counter_u64_add(ocf_tls12_gcm_crypts, 1); 511 else 512 counter_u64_add(ocf_tls12_chacha20_crypts, 1); 513 error = ktls_ocf_dispatch(os, &crp); 514 515 crypto_destroyreq(&crp); 516 *trailer_len = tls->params.tls_tlen; 517 return (error); 518 } 519 520 static int 521 ktls_ocf_tls13_aead_encrypt(struct ktls_ocf_encrypt_state *state, 522 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 523 int outiovcnt) 524 { 525 const struct tls_record_layer *hdr; 526 struct uio *uio; 527 struct tls_aead_data_13 *ad; 528 struct cryptop *crp; 529 struct ktls_ocf_session *os; 530 char nonce[12]; 531 int error; 532 533 os = tls->ocf_session; 534 hdr = (const struct tls_record_layer *)m->m_epg_hdr; 535 crp = &state->crp; 536 uio = &state->uio; 537 538 crypto_initreq(crp, os->sid); 539 540 /* Setup the nonce. */ 541 memcpy(nonce, tls->params.iv, tls->params.iv_len); 542 *(uint64_t *)(nonce + 4) ^= htobe64(m->m_epg_seqno); 543 544 /* Setup the AAD. */ 545 ad = &state->aead13; 546 ad->type = hdr->tls_type; 547 ad->tls_vmajor = hdr->tls_vmajor; 548 ad->tls_vminor = hdr->tls_vminor; 549 ad->tls_length = hdr->tls_length; 550 crp->crp_aad = ad; 551 crp->crp_aad_length = sizeof(*ad); 552 553 /* Set fields for input payload. */ 554 crypto_use_single_mbuf(crp, m); 555 crp->crp_payload_start = m->m_epg_hdrlen; 556 crp->crp_payload_length = m->m_len - 557 (m->m_epg_hdrlen + m->m_epg_trllen); 558 559 /* Store the record type as the first byte of the trailer. */ 560 m->m_epg_trail[0] = m->m_epg_record_type; 561 crp->crp_payload_length++; 562 563 if (outiov != NULL) { 564 crp->crp_digest_start = crp->crp_payload_length; 565 566 uio->uio_iov = outiov; 567 uio->uio_iovcnt = outiovcnt; 568 uio->uio_offset = 0; 569 uio->uio_segflg = UIO_SYSSPACE; 570 uio->uio_td = curthread; 571 uio->uio_resid = m->m_len - m->m_epg_hdrlen; 572 crypto_use_output_uio(crp, uio); 573 } else 574 crp->crp_digest_start = crp->crp_payload_start + 575 crp->crp_payload_length; 576 577 crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; 578 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 579 580 memcpy(crp->crp_iv, nonce, sizeof(nonce)); 581 582 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 583 counter_u64_add(ocf_tls13_gcm_crypts, 1); 584 else 585 counter_u64_add(ocf_tls13_chacha20_crypts, 1); 586 if (outiov != NULL) 587 counter_u64_add(ocf_separate_output, 1); 588 else 589 counter_u64_add(ocf_inplace, 1); 590 if (tls->sync_dispatch) { 591 error = ktls_ocf_dispatch(os, crp); 592 crypto_destroyreq(crp); 593 } else 594 error = ktls_ocf_dispatch_async(state, crp); 595 return (error); 596 } 597 598 void 599 ktls_ocf_free(struct ktls_session *tls) 600 { 601 struct ktls_ocf_session *os; 602 603 os = tls->ocf_session; 604 crypto_freesession(os->sid); 605 mtx_destroy(&os->lock); 606 zfree(os, M_KTLS_OCF); 607 } 608 609 int 610 ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction) 611 { 612 struct crypto_session_params csp, mac_csp; 613 struct ktls_ocf_session *os; 614 int error, mac_len; 615 616 memset(&csp, 0, sizeof(csp)); 617 memset(&mac_csp, 0, sizeof(mac_csp)); 618 mac_csp.csp_mode = CSP_MODE_NONE; 619 mac_len = 0; 620 621 switch (tls->params.cipher_algorithm) { 622 case CRYPTO_AES_NIST_GCM_16: 623 switch (tls->params.cipher_key_len) { 624 case 128 / 8: 625 case 256 / 8: 626 break; 627 default: 628 return (EINVAL); 629 } 630 631 /* Only TLS 1.2 and 1.3 are supported. */ 632 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 633 tls->params.tls_vminor < TLS_MINOR_VER_TWO || 634 tls->params.tls_vminor > TLS_MINOR_VER_THREE) 635 return (EPROTONOSUPPORT); 636 637 /* TLS 1.3 is not yet supported for receive. */ 638 if (direction == KTLS_RX && 639 tls->params.tls_vminor == TLS_MINOR_VER_THREE) 640 return (EPROTONOSUPPORT); 641 642 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD; 643 csp.csp_mode = CSP_MODE_AEAD; 644 csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16; 645 csp.csp_cipher_key = tls->params.cipher_key; 646 csp.csp_cipher_klen = tls->params.cipher_key_len; 647 csp.csp_ivlen = AES_GCM_IV_LEN; 648 break; 649 case CRYPTO_AES_CBC: 650 switch (tls->params.cipher_key_len) { 651 case 128 / 8: 652 case 256 / 8: 653 break; 654 default: 655 return (EINVAL); 656 } 657 658 switch (tls->params.auth_algorithm) { 659 case CRYPTO_SHA1_HMAC: 660 mac_len = SHA1_HASH_LEN; 661 break; 662 case CRYPTO_SHA2_256_HMAC: 663 mac_len = SHA2_256_HASH_LEN; 664 break; 665 case CRYPTO_SHA2_384_HMAC: 666 mac_len = SHA2_384_HASH_LEN; 667 break; 668 default: 669 return (EINVAL); 670 } 671 672 /* Only TLS 1.0-1.2 are supported. */ 673 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 674 tls->params.tls_vminor < TLS_MINOR_VER_ZERO || 675 tls->params.tls_vminor > TLS_MINOR_VER_TWO) 676 return (EPROTONOSUPPORT); 677 678 /* AES-CBC is not supported for receive. */ 679 if (direction == KTLS_RX) 680 return (EPROTONOSUPPORT); 681 682 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; 683 csp.csp_mode = CSP_MODE_CIPHER; 684 csp.csp_cipher_alg = CRYPTO_AES_CBC; 685 csp.csp_cipher_key = tls->params.cipher_key; 686 csp.csp_cipher_klen = tls->params.cipher_key_len; 687 csp.csp_ivlen = AES_BLOCK_LEN; 688 689 mac_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; 690 mac_csp.csp_mode = CSP_MODE_DIGEST; 691 mac_csp.csp_auth_alg = tls->params.auth_algorithm; 692 mac_csp.csp_auth_key = tls->params.auth_key; 693 mac_csp.csp_auth_klen = tls->params.auth_key_len; 694 break; 695 case CRYPTO_CHACHA20_POLY1305: 696 switch (tls->params.cipher_key_len) { 697 case 256 / 8: 698 break; 699 default: 700 return (EINVAL); 701 } 702 703 /* Only TLS 1.2 and 1.3 are supported. */ 704 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 705 tls->params.tls_vminor < TLS_MINOR_VER_TWO || 706 tls->params.tls_vminor > TLS_MINOR_VER_THREE) 707 return (EPROTONOSUPPORT); 708 709 /* TLS 1.3 is not yet supported for receive. */ 710 if (direction == KTLS_RX && 711 tls->params.tls_vminor == TLS_MINOR_VER_THREE) 712 return (EPROTONOSUPPORT); 713 714 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD; 715 csp.csp_mode = CSP_MODE_AEAD; 716 csp.csp_cipher_alg = CRYPTO_CHACHA20_POLY1305; 717 csp.csp_cipher_key = tls->params.cipher_key; 718 csp.csp_cipher_klen = tls->params.cipher_key_len; 719 csp.csp_ivlen = CHACHA20_POLY1305_IV_LEN; 720 break; 721 default: 722 return (EPROTONOSUPPORT); 723 } 724 725 os = malloc(sizeof(*os), M_KTLS_OCF, M_NOWAIT | M_ZERO); 726 if (os == NULL) 727 return (ENOMEM); 728 729 error = crypto_newsession(&os->sid, &csp, 730 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE); 731 if (error) { 732 free(os, M_KTLS_OCF); 733 return (error); 734 } 735 736 if (mac_csp.csp_mode != CSP_MODE_NONE) { 737 error = crypto_newsession(&os->mac_sid, &mac_csp, 738 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE); 739 if (error) { 740 crypto_freesession(os->sid); 741 free(os, M_KTLS_OCF); 742 return (error); 743 } 744 os->mac_len = mac_len; 745 } 746 747 mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF); 748 tls->ocf_session = os; 749 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 || 750 tls->params.cipher_algorithm == CRYPTO_CHACHA20_POLY1305) { 751 if (direction == KTLS_TX) { 752 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE) 753 tls->sw_encrypt = ktls_ocf_tls13_aead_encrypt; 754 else 755 tls->sw_encrypt = ktls_ocf_tls12_aead_encrypt; 756 } else { 757 tls->sw_decrypt = ktls_ocf_tls12_aead_decrypt; 758 } 759 } else { 760 tls->sw_encrypt = ktls_ocf_tls_cbc_encrypt; 761 if (tls->params.tls_vminor == TLS_MINOR_VER_ZERO) { 762 os->implicit_iv = true; 763 memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN); 764 #ifdef INVARIANTS 765 os->next_seqno = tls->next_seqno; 766 #endif 767 } 768 } 769 770 /* 771 * AES-CBC is always synchronous currently. Asynchronous 772 * operation would require multiple callbacks and an additional 773 * iovec array in ktls_ocf_encrypt_state. 774 */ 775 tls->sync_dispatch = CRYPTO_SESS_SYNC(os->sid) || 776 tls->params.cipher_algorithm == CRYPTO_AES_CBC; 777 return (0); 778 } 779