1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2019 Netflix Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/counter.h> 34 #include <sys/endian.h> 35 #include <sys/kernel.h> 36 #include <sys/ktls.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/mbuf.h> 40 #include <sys/module.h> 41 #include <sys/mutex.h> 42 #include <sys/sysctl.h> 43 #include <sys/uio.h> 44 #include <vm/vm.h> 45 #include <vm/pmap.h> 46 #include <vm/vm_param.h> 47 #include <opencrypto/cryptodev.h> 48 #include <opencrypto/ktls.h> 49 50 struct ktls_ocf_session { 51 crypto_session_t sid; 52 crypto_session_t mac_sid; 53 struct mtx lock; 54 int mac_len; 55 bool implicit_iv; 56 57 /* Only used for TLS 1.0 with the implicit IV. */ 58 #ifdef INVARIANTS 59 bool in_progress; 60 uint64_t next_seqno; 61 #endif 62 char iv[AES_BLOCK_LEN]; 63 }; 64 65 struct ocf_operation { 66 struct ktls_ocf_session *os; 67 bool done; 68 }; 69 70 static MALLOC_DEFINE(M_KTLS_OCF, "ktls_ocf", "OCF KTLS"); 71 72 SYSCTL_DECL(_kern_ipc_tls); 73 SYSCTL_DECL(_kern_ipc_tls_stats); 74 75 static SYSCTL_NODE(_kern_ipc_tls_stats, OID_AUTO, ocf, 76 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 77 "Kernel TLS offload via OCF stats"); 78 79 static COUNTER_U64_DEFINE_EARLY(ocf_tls10_cbc_encrypts); 80 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls10_cbc_encrypts, 81 CTLFLAG_RD, &ocf_tls10_cbc_encrypts, 82 "Total number of OCF TLS 1.0 CBC encryption operations"); 83 84 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_encrypts); 85 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_encrypts, 86 CTLFLAG_RD, &ocf_tls11_cbc_encrypts, 87 "Total number of OCF TLS 1.1/1.2 CBC encryption operations"); 88 89 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_decrypts); 90 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_decrypts, 91 CTLFLAG_RD, &ocf_tls12_gcm_decrypts, 92 "Total number of OCF TLS 1.2 GCM decryption operations"); 93 94 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_encrypts); 95 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_encrypts, 96 CTLFLAG_RD, &ocf_tls12_gcm_encrypts, 97 "Total number of OCF TLS 1.2 GCM encryption operations"); 98 99 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_decrypts); 100 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_decrypts, 101 CTLFLAG_RD, &ocf_tls12_chacha20_decrypts, 102 "Total number of OCF TLS 1.2 Chacha20-Poly1305 decryption operations"); 103 104 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_encrypts); 105 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_encrypts, 106 CTLFLAG_RD, &ocf_tls12_chacha20_encrypts, 107 "Total number of OCF TLS 1.2 Chacha20-Poly1305 encryption operations"); 108 109 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_encrypts); 110 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_encrypts, 111 CTLFLAG_RD, &ocf_tls13_gcm_encrypts, 112 "Total number of OCF TLS 1.3 GCM encryption operations"); 113 114 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_encrypts); 115 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_encrypts, 116 CTLFLAG_RD, &ocf_tls13_chacha20_encrypts, 117 "Total number of OCF TLS 1.3 Chacha20-Poly1305 encryption operations"); 118 119 static COUNTER_U64_DEFINE_EARLY(ocf_inplace); 120 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, inplace, 121 CTLFLAG_RD, &ocf_inplace, 122 "Total number of OCF in-place operations"); 123 124 static COUNTER_U64_DEFINE_EARLY(ocf_separate_output); 125 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, separate_output, 126 CTLFLAG_RD, &ocf_separate_output, 127 "Total number of OCF operations with a separate output buffer"); 128 129 static COUNTER_U64_DEFINE_EARLY(ocf_retries); 130 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, retries, CTLFLAG_RD, 131 &ocf_retries, 132 "Number of OCF encryption operation retries"); 133 134 static int 135 ktls_ocf_callback_sync(struct cryptop *crp __unused) 136 { 137 return (0); 138 } 139 140 static int 141 ktls_ocf_callback_async(struct cryptop *crp) 142 { 143 struct ocf_operation *oo; 144 145 oo = crp->crp_opaque; 146 mtx_lock(&oo->os->lock); 147 oo->done = true; 148 mtx_unlock(&oo->os->lock); 149 wakeup(oo); 150 return (0); 151 } 152 153 static int 154 ktls_ocf_dispatch(struct ktls_ocf_session *os, struct cryptop *crp) 155 { 156 struct ocf_operation oo; 157 int error; 158 bool async; 159 160 oo.os = os; 161 oo.done = false; 162 163 crp->crp_opaque = &oo; 164 for (;;) { 165 async = !CRYPTO_SESS_SYNC(crp->crp_session); 166 crp->crp_callback = async ? ktls_ocf_callback_async : 167 ktls_ocf_callback_sync; 168 169 error = crypto_dispatch(crp); 170 if (error) 171 break; 172 if (async) { 173 mtx_lock(&os->lock); 174 while (!oo.done) 175 mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0); 176 mtx_unlock(&os->lock); 177 } 178 179 if (crp->crp_etype != EAGAIN) { 180 error = crp->crp_etype; 181 break; 182 } 183 184 crp->crp_etype = 0; 185 crp->crp_flags &= ~CRYPTO_F_DONE; 186 oo.done = false; 187 counter_u64_add(ocf_retries, 1); 188 } 189 return (error); 190 } 191 192 static int 193 ktls_ocf_dispatch_async_cb(struct cryptop *crp) 194 { 195 struct ktls_ocf_encrypt_state *state; 196 int error; 197 198 state = crp->crp_opaque; 199 if (crp->crp_etype == EAGAIN) { 200 crp->crp_etype = 0; 201 crp->crp_flags &= ~CRYPTO_F_DONE; 202 counter_u64_add(ocf_retries, 1); 203 error = crypto_dispatch(crp); 204 if (error != 0) { 205 crypto_destroyreq(crp); 206 ktls_encrypt_cb(state, error); 207 } 208 return (0); 209 } 210 211 error = crp->crp_etype; 212 crypto_destroyreq(crp); 213 ktls_encrypt_cb(state, error); 214 return (0); 215 } 216 217 static int 218 ktls_ocf_dispatch_async(struct ktls_ocf_encrypt_state *state, 219 struct cryptop *crp) 220 { 221 int error; 222 223 crp->crp_opaque = state; 224 crp->crp_callback = ktls_ocf_dispatch_async_cb; 225 error = crypto_dispatch(crp); 226 if (error != 0) 227 crypto_destroyreq(crp); 228 return (error); 229 } 230 231 static int 232 ktls_ocf_tls_cbc_encrypt(struct ktls_ocf_encrypt_state *state, 233 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 234 int outiovcnt) 235 { 236 const struct tls_record_layer *hdr; 237 struct uio *uio; 238 struct tls_mac_data *ad; 239 struct cryptop *crp; 240 struct ktls_ocf_session *os; 241 struct iovec iov[m->m_epg_npgs + 2]; 242 u_int pgoff; 243 int i, error; 244 uint16_t tls_comp_len; 245 uint8_t pad; 246 247 MPASS(outiovcnt + 1 <= nitems(iov)); 248 249 os = tls->ocf_session; 250 hdr = (const struct tls_record_layer *)m->m_epg_hdr; 251 crp = &state->crp; 252 uio = &state->uio; 253 MPASS(tls->sync_dispatch); 254 255 #ifdef INVARIANTS 256 if (os->implicit_iv) { 257 mtx_lock(&os->lock); 258 KASSERT(!os->in_progress, 259 ("concurrent implicit IV encryptions")); 260 if (os->next_seqno != m->m_epg_seqno) { 261 printf("KTLS CBC: TLS records out of order. " 262 "Expected %ju, got %ju\n", 263 (uintmax_t)os->next_seqno, 264 (uintmax_t)m->m_epg_seqno); 265 mtx_unlock(&os->lock); 266 return (EINVAL); 267 } 268 os->in_progress = true; 269 mtx_unlock(&os->lock); 270 } 271 #endif 272 273 /* Payload length. */ 274 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen); 275 276 /* Initialize the AAD. */ 277 ad = &state->mac; 278 ad->seq = htobe64(m->m_epg_seqno); 279 ad->type = hdr->tls_type; 280 ad->tls_vmajor = hdr->tls_vmajor; 281 ad->tls_vminor = hdr->tls_vminor; 282 ad->tls_length = htons(tls_comp_len); 283 284 /* First, compute the MAC. */ 285 iov[0].iov_base = ad; 286 iov[0].iov_len = sizeof(*ad); 287 pgoff = m->m_epg_1st_off; 288 for (i = 0; i < m->m_epg_npgs; i++, pgoff = 0) { 289 iov[i + 1].iov_base = (void *)PHYS_TO_DMAP(m->m_epg_pa[i] + 290 pgoff); 291 iov[i + 1].iov_len = m_epg_pagelen(m, i, pgoff); 292 } 293 iov[m->m_epg_npgs + 1].iov_base = m->m_epg_trail; 294 iov[m->m_epg_npgs + 1].iov_len = os->mac_len; 295 uio->uio_iov = iov; 296 uio->uio_iovcnt = m->m_epg_npgs + 2; 297 uio->uio_offset = 0; 298 uio->uio_segflg = UIO_SYSSPACE; 299 uio->uio_td = curthread; 300 uio->uio_resid = sizeof(*ad) + tls_comp_len + os->mac_len; 301 302 crypto_initreq(crp, os->mac_sid); 303 crp->crp_payload_start = 0; 304 crp->crp_payload_length = sizeof(*ad) + tls_comp_len; 305 crp->crp_digest_start = crp->crp_payload_length; 306 crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST; 307 crp->crp_flags = CRYPTO_F_CBIMM; 308 crypto_use_uio(crp, uio); 309 error = ktls_ocf_dispatch(os, crp); 310 311 crypto_destroyreq(crp); 312 if (error) { 313 #ifdef INVARIANTS 314 if (os->implicit_iv) { 315 mtx_lock(&os->lock); 316 os->in_progress = false; 317 mtx_unlock(&os->lock); 318 } 319 #endif 320 return (error); 321 } 322 323 /* Second, add the padding. */ 324 pad = m->m_epg_trllen - os->mac_len - 1; 325 for (i = 0; i < pad + 1; i++) 326 m->m_epg_trail[os->mac_len + i] = pad; 327 328 /* Finally, encrypt the record. */ 329 crypto_initreq(crp, os->sid); 330 crp->crp_payload_start = m->m_epg_hdrlen; 331 crp->crp_payload_length = tls_comp_len + m->m_epg_trllen; 332 KASSERT(crp->crp_payload_length % AES_BLOCK_LEN == 0, 333 ("invalid encryption size")); 334 crypto_use_single_mbuf(crp, m); 335 crp->crp_op = CRYPTO_OP_ENCRYPT; 336 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 337 if (os->implicit_iv) 338 memcpy(crp->crp_iv, os->iv, AES_BLOCK_LEN); 339 else 340 memcpy(crp->crp_iv, hdr + 1, AES_BLOCK_LEN); 341 342 if (outiov != NULL) { 343 uio->uio_iov = outiov; 344 uio->uio_iovcnt = outiovcnt; 345 uio->uio_offset = 0; 346 uio->uio_segflg = UIO_SYSSPACE; 347 uio->uio_td = curthread; 348 uio->uio_resid = crp->crp_payload_length; 349 crypto_use_output_uio(crp, uio); 350 } 351 352 if (os->implicit_iv) 353 counter_u64_add(ocf_tls10_cbc_encrypts, 1); 354 else 355 counter_u64_add(ocf_tls11_cbc_encrypts, 1); 356 if (outiov != NULL) 357 counter_u64_add(ocf_separate_output, 1); 358 else 359 counter_u64_add(ocf_inplace, 1); 360 error = ktls_ocf_dispatch(os, crp); 361 362 crypto_destroyreq(crp); 363 364 if (os->implicit_iv) { 365 KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN, 366 ("trailer too short to read IV")); 367 memcpy(os->iv, m->m_epg_trail + m->m_epg_trllen - AES_BLOCK_LEN, 368 AES_BLOCK_LEN); 369 #ifdef INVARIANTS 370 mtx_lock(&os->lock); 371 os->next_seqno = m->m_epg_seqno + 1; 372 os->in_progress = false; 373 mtx_unlock(&os->lock); 374 #endif 375 } 376 return (error); 377 } 378 379 static int 380 ktls_ocf_tls12_aead_encrypt(struct ktls_ocf_encrypt_state *state, 381 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 382 int outiovcnt) 383 { 384 const struct tls_record_layer *hdr; 385 struct uio *uio; 386 struct tls_aead_data *ad; 387 struct cryptop *crp; 388 struct ktls_ocf_session *os; 389 int error; 390 uint16_t tls_comp_len; 391 392 os = tls->ocf_session; 393 hdr = (const struct tls_record_layer *)m->m_epg_hdr; 394 crp = &state->crp; 395 uio = &state->uio; 396 397 crypto_initreq(crp, os->sid); 398 399 /* Setup the IV. */ 400 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) { 401 memcpy(crp->crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN); 402 memcpy(crp->crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, 403 sizeof(uint64_t)); 404 } else { 405 /* 406 * Chacha20-Poly1305 constructs the IV for TLS 1.2 407 * identically to constructing the IV for AEAD in TLS 408 * 1.3. 409 */ 410 memcpy(crp->crp_iv, tls->params.iv, tls->params.iv_len); 411 *(uint64_t *)(crp->crp_iv + 4) ^= htobe64(m->m_epg_seqno); 412 } 413 414 /* Setup the AAD. */ 415 ad = &state->aead; 416 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen); 417 ad->seq = htobe64(m->m_epg_seqno); 418 ad->type = hdr->tls_type; 419 ad->tls_vmajor = hdr->tls_vmajor; 420 ad->tls_vminor = hdr->tls_vminor; 421 ad->tls_length = htons(tls_comp_len); 422 crp->crp_aad = ad; 423 crp->crp_aad_length = sizeof(*ad); 424 425 /* Set fields for input payload. */ 426 crypto_use_single_mbuf(crp, m); 427 crp->crp_payload_start = m->m_epg_hdrlen; 428 crp->crp_payload_length = tls_comp_len; 429 430 if (outiov != NULL) { 431 crp->crp_digest_start = crp->crp_payload_length; 432 433 uio->uio_iov = outiov; 434 uio->uio_iovcnt = outiovcnt; 435 uio->uio_offset = 0; 436 uio->uio_segflg = UIO_SYSSPACE; 437 uio->uio_td = curthread; 438 uio->uio_resid = crp->crp_payload_length + tls->params.tls_tlen; 439 crypto_use_output_uio(crp, uio); 440 } else 441 crp->crp_digest_start = crp->crp_payload_start + 442 crp->crp_payload_length; 443 444 crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; 445 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 446 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 447 counter_u64_add(ocf_tls12_gcm_encrypts, 1); 448 else 449 counter_u64_add(ocf_tls12_chacha20_encrypts, 1); 450 if (outiov != NULL) 451 counter_u64_add(ocf_separate_output, 1); 452 else 453 counter_u64_add(ocf_inplace, 1); 454 if (tls->sync_dispatch) { 455 error = ktls_ocf_dispatch(os, crp); 456 crypto_destroyreq(crp); 457 } else 458 error = ktls_ocf_dispatch_async(state, crp); 459 return (error); 460 } 461 462 static int 463 ktls_ocf_tls12_aead_decrypt(struct ktls_session *tls, 464 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno, 465 int *trailer_len) 466 { 467 struct tls_aead_data ad; 468 struct cryptop crp; 469 struct ktls_ocf_session *os; 470 int error; 471 uint16_t tls_comp_len; 472 473 os = tls->ocf_session; 474 475 crypto_initreq(&crp, os->sid); 476 477 /* Setup the IV. */ 478 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) { 479 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN); 480 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, 481 sizeof(uint64_t)); 482 } else { 483 /* 484 * Chacha20-Poly1305 constructs the IV for TLS 1.2 485 * identically to constructing the IV for AEAD in TLS 486 * 1.3. 487 */ 488 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len); 489 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno); 490 } 491 492 /* Setup the AAD. */ 493 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 494 tls_comp_len = ntohs(hdr->tls_length) - 495 (AES_GMAC_HASH_LEN + sizeof(uint64_t)); 496 else 497 tls_comp_len = ntohs(hdr->tls_length) - POLY1305_HASH_LEN; 498 ad.seq = htobe64(seqno); 499 ad.type = hdr->tls_type; 500 ad.tls_vmajor = hdr->tls_vmajor; 501 ad.tls_vminor = hdr->tls_vminor; 502 ad.tls_length = htons(tls_comp_len); 503 crp.crp_aad = &ad; 504 crp.crp_aad_length = sizeof(ad); 505 506 crp.crp_payload_start = tls->params.tls_hlen; 507 crp.crp_payload_length = tls_comp_len; 508 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length; 509 510 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST; 511 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 512 crypto_use_mbuf(&crp, m); 513 514 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 515 counter_u64_add(ocf_tls12_gcm_decrypts, 1); 516 else 517 counter_u64_add(ocf_tls12_chacha20_decrypts, 1); 518 error = ktls_ocf_dispatch(os, &crp); 519 520 crypto_destroyreq(&crp); 521 *trailer_len = tls->params.tls_tlen; 522 return (error); 523 } 524 525 static int 526 ktls_ocf_tls13_aead_encrypt(struct ktls_ocf_encrypt_state *state, 527 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov, 528 int outiovcnt) 529 { 530 const struct tls_record_layer *hdr; 531 struct uio *uio; 532 struct tls_aead_data_13 *ad; 533 struct cryptop *crp; 534 struct ktls_ocf_session *os; 535 char nonce[12]; 536 int error; 537 538 os = tls->ocf_session; 539 hdr = (const struct tls_record_layer *)m->m_epg_hdr; 540 crp = &state->crp; 541 uio = &state->uio; 542 543 crypto_initreq(crp, os->sid); 544 545 /* Setup the nonce. */ 546 memcpy(nonce, tls->params.iv, tls->params.iv_len); 547 *(uint64_t *)(nonce + 4) ^= htobe64(m->m_epg_seqno); 548 549 /* Setup the AAD. */ 550 ad = &state->aead13; 551 ad->type = hdr->tls_type; 552 ad->tls_vmajor = hdr->tls_vmajor; 553 ad->tls_vminor = hdr->tls_vminor; 554 ad->tls_length = hdr->tls_length; 555 crp->crp_aad = ad; 556 crp->crp_aad_length = sizeof(*ad); 557 558 /* Set fields for input payload. */ 559 crypto_use_single_mbuf(crp, m); 560 crp->crp_payload_start = m->m_epg_hdrlen; 561 crp->crp_payload_length = m->m_len - 562 (m->m_epg_hdrlen + m->m_epg_trllen); 563 564 /* Store the record type as the first byte of the trailer. */ 565 m->m_epg_trail[0] = m->m_epg_record_type; 566 crp->crp_payload_length++; 567 568 if (outiov != NULL) { 569 crp->crp_digest_start = crp->crp_payload_length; 570 571 uio->uio_iov = outiov; 572 uio->uio_iovcnt = outiovcnt; 573 uio->uio_offset = 0; 574 uio->uio_segflg = UIO_SYSSPACE; 575 uio->uio_td = curthread; 576 uio->uio_resid = m->m_len - m->m_epg_hdrlen; 577 crypto_use_output_uio(crp, uio); 578 } else 579 crp->crp_digest_start = crp->crp_payload_start + 580 crp->crp_payload_length; 581 582 crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; 583 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; 584 585 memcpy(crp->crp_iv, nonce, sizeof(nonce)); 586 587 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 588 counter_u64_add(ocf_tls13_gcm_encrypts, 1); 589 else 590 counter_u64_add(ocf_tls13_chacha20_encrypts, 1); 591 if (outiov != NULL) 592 counter_u64_add(ocf_separate_output, 1); 593 else 594 counter_u64_add(ocf_inplace, 1); 595 if (tls->sync_dispatch) { 596 error = ktls_ocf_dispatch(os, crp); 597 crypto_destroyreq(crp); 598 } else 599 error = ktls_ocf_dispatch_async(state, crp); 600 return (error); 601 } 602 603 void 604 ktls_ocf_free(struct ktls_session *tls) 605 { 606 struct ktls_ocf_session *os; 607 608 os = tls->ocf_session; 609 crypto_freesession(os->sid); 610 mtx_destroy(&os->lock); 611 zfree(os, M_KTLS_OCF); 612 } 613 614 int 615 ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction) 616 { 617 struct crypto_session_params csp, mac_csp; 618 struct ktls_ocf_session *os; 619 int error, mac_len; 620 621 memset(&csp, 0, sizeof(csp)); 622 memset(&mac_csp, 0, sizeof(mac_csp)); 623 mac_csp.csp_mode = CSP_MODE_NONE; 624 mac_len = 0; 625 626 switch (tls->params.cipher_algorithm) { 627 case CRYPTO_AES_NIST_GCM_16: 628 switch (tls->params.cipher_key_len) { 629 case 128 / 8: 630 case 256 / 8: 631 break; 632 default: 633 return (EINVAL); 634 } 635 636 /* Only TLS 1.2 and 1.3 are supported. */ 637 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 638 tls->params.tls_vminor < TLS_MINOR_VER_TWO || 639 tls->params.tls_vminor > TLS_MINOR_VER_THREE) 640 return (EPROTONOSUPPORT); 641 642 /* TLS 1.3 is not yet supported for receive. */ 643 if (direction == KTLS_RX && 644 tls->params.tls_vminor == TLS_MINOR_VER_THREE) 645 return (EPROTONOSUPPORT); 646 647 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD; 648 csp.csp_mode = CSP_MODE_AEAD; 649 csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16; 650 csp.csp_cipher_key = tls->params.cipher_key; 651 csp.csp_cipher_klen = tls->params.cipher_key_len; 652 csp.csp_ivlen = AES_GCM_IV_LEN; 653 break; 654 case CRYPTO_AES_CBC: 655 switch (tls->params.cipher_key_len) { 656 case 128 / 8: 657 case 256 / 8: 658 break; 659 default: 660 return (EINVAL); 661 } 662 663 switch (tls->params.auth_algorithm) { 664 case CRYPTO_SHA1_HMAC: 665 mac_len = SHA1_HASH_LEN; 666 break; 667 case CRYPTO_SHA2_256_HMAC: 668 mac_len = SHA2_256_HASH_LEN; 669 break; 670 case CRYPTO_SHA2_384_HMAC: 671 mac_len = SHA2_384_HASH_LEN; 672 break; 673 default: 674 return (EINVAL); 675 } 676 677 /* Only TLS 1.0-1.2 are supported. */ 678 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 679 tls->params.tls_vminor < TLS_MINOR_VER_ZERO || 680 tls->params.tls_vminor > TLS_MINOR_VER_TWO) 681 return (EPROTONOSUPPORT); 682 683 /* AES-CBC is not supported for receive. */ 684 if (direction == KTLS_RX) 685 return (EPROTONOSUPPORT); 686 687 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; 688 csp.csp_mode = CSP_MODE_CIPHER; 689 csp.csp_cipher_alg = CRYPTO_AES_CBC; 690 csp.csp_cipher_key = tls->params.cipher_key; 691 csp.csp_cipher_klen = tls->params.cipher_key_len; 692 csp.csp_ivlen = AES_BLOCK_LEN; 693 694 mac_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; 695 mac_csp.csp_mode = CSP_MODE_DIGEST; 696 mac_csp.csp_auth_alg = tls->params.auth_algorithm; 697 mac_csp.csp_auth_key = tls->params.auth_key; 698 mac_csp.csp_auth_klen = tls->params.auth_key_len; 699 break; 700 case CRYPTO_CHACHA20_POLY1305: 701 switch (tls->params.cipher_key_len) { 702 case 256 / 8: 703 break; 704 default: 705 return (EINVAL); 706 } 707 708 /* Only TLS 1.2 and 1.3 are supported. */ 709 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 710 tls->params.tls_vminor < TLS_MINOR_VER_TWO || 711 tls->params.tls_vminor > TLS_MINOR_VER_THREE) 712 return (EPROTONOSUPPORT); 713 714 /* TLS 1.3 is not yet supported for receive. */ 715 if (direction == KTLS_RX && 716 tls->params.tls_vminor == TLS_MINOR_VER_THREE) 717 return (EPROTONOSUPPORT); 718 719 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD; 720 csp.csp_mode = CSP_MODE_AEAD; 721 csp.csp_cipher_alg = CRYPTO_CHACHA20_POLY1305; 722 csp.csp_cipher_key = tls->params.cipher_key; 723 csp.csp_cipher_klen = tls->params.cipher_key_len; 724 csp.csp_ivlen = CHACHA20_POLY1305_IV_LEN; 725 break; 726 default: 727 return (EPROTONOSUPPORT); 728 } 729 730 os = malloc(sizeof(*os), M_KTLS_OCF, M_NOWAIT | M_ZERO); 731 if (os == NULL) 732 return (ENOMEM); 733 734 error = crypto_newsession(&os->sid, &csp, 735 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE); 736 if (error) { 737 free(os, M_KTLS_OCF); 738 return (error); 739 } 740 741 if (mac_csp.csp_mode != CSP_MODE_NONE) { 742 error = crypto_newsession(&os->mac_sid, &mac_csp, 743 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE); 744 if (error) { 745 crypto_freesession(os->sid); 746 free(os, M_KTLS_OCF); 747 return (error); 748 } 749 os->mac_len = mac_len; 750 } 751 752 mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF); 753 tls->ocf_session = os; 754 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 || 755 tls->params.cipher_algorithm == CRYPTO_CHACHA20_POLY1305) { 756 if (direction == KTLS_TX) { 757 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE) 758 tls->sw_encrypt = ktls_ocf_tls13_aead_encrypt; 759 else 760 tls->sw_encrypt = ktls_ocf_tls12_aead_encrypt; 761 } else { 762 tls->sw_decrypt = ktls_ocf_tls12_aead_decrypt; 763 } 764 } else { 765 tls->sw_encrypt = ktls_ocf_tls_cbc_encrypt; 766 if (tls->params.tls_vminor == TLS_MINOR_VER_ZERO) { 767 os->implicit_iv = true; 768 memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN); 769 #ifdef INVARIANTS 770 os->next_seqno = tls->next_seqno; 771 #endif 772 } 773 } 774 775 /* 776 * AES-CBC is always synchronous currently. Asynchronous 777 * operation would require multiple callbacks and an additional 778 * iovec array in ktls_ocf_encrypt_state. 779 */ 780 tls->sync_dispatch = CRYPTO_SESS_SYNC(os->sid) || 781 tls->params.cipher_algorithm == CRYPTO_AES_CBC; 782 return (0); 783 } 784