Lines Matching +full:dma +full:- +full:byte +full:- +full:en
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2014-2019 Netflix Inc.
132 "Number of TLS threads in thread-pool");
147 "Enable support of AES-CBC crypto for kernel TLS");
236 "Active number of software TLS sessions using AES-CBC");
240 "Active number of software TLS sessions using AES-GCM");
245 "Active number of software TLS sessions using Chacha20-Poly1305");
250 "Active number of ifnet TLS sessions using AES-CBC");
255 "Active number of ifnet TLS sessions using AES-GCM");
260 "Active number of ifnet TLS sessions using Chacha20-Poly1305");
285 "Active number of TOE TLS sessions using AES-CBC");
290 "Active number of TOE TLS sessions using AES-GCM");
295 "Active number of TOE TLS sessions using Chacha20-Poly1305");
312 if (sopt->sopt_valsize == sizeof(tls_v0)) { in ktls_copyin_tls_enable()
317 tls->cipher_key = tls_v0.cipher_key; in ktls_copyin_tls_enable()
318 tls->iv = tls_v0.iv; in ktls_copyin_tls_enable()
319 tls->auth_key = tls_v0.auth_key; in ktls_copyin_tls_enable()
320 tls->cipher_algorithm = tls_v0.cipher_algorithm; in ktls_copyin_tls_enable()
321 tls->cipher_key_len = tls_v0.cipher_key_len; in ktls_copyin_tls_enable()
322 tls->iv_len = tls_v0.iv_len; in ktls_copyin_tls_enable()
323 tls->auth_algorithm = tls_v0.auth_algorithm; in ktls_copyin_tls_enable()
324 tls->auth_key_len = tls_v0.auth_key_len; in ktls_copyin_tls_enable()
325 tls->flags = tls_v0.flags; in ktls_copyin_tls_enable()
326 tls->tls_vmajor = tls_v0.tls_vmajor; in ktls_copyin_tls_enable()
327 tls->tls_vminor = tls_v0.tls_vminor; in ktls_copyin_tls_enable()
334 if (tls->cipher_key_len < 0 || tls->cipher_key_len > TLS_MAX_PARAM_SIZE) in ktls_copyin_tls_enable()
336 if (tls->iv_len < 0 || tls->iv_len > sizeof(((struct ktls_session *)NULL)->params.iv)) in ktls_copyin_tls_enable()
338 if (tls->auth_key_len < 0 || tls->auth_key_len > TLS_MAX_PARAM_SIZE) in ktls_copyin_tls_enable()
342 if (tls->cipher_key_len == 0) in ktls_copyin_tls_enable()
346 * Now do a deep copy of the variable-length arrays in the struct, so that in ktls_copyin_tls_enable()
352 if (tls->cipher_key_len != 0) { in ktls_copyin_tls_enable()
353 cipher_key = malloc(tls->cipher_key_len, M_KTLS, M_WAITOK); in ktls_copyin_tls_enable()
354 if (sopt->sopt_td != NULL) { in ktls_copyin_tls_enable()
355 error = copyin(tls->cipher_key, cipher_key, tls->cipher_key_len); in ktls_copyin_tls_enable()
359 bcopy(tls->cipher_key, cipher_key, tls->cipher_key_len); in ktls_copyin_tls_enable()
362 if (tls->iv_len != 0) { in ktls_copyin_tls_enable()
363 iv = malloc(tls->iv_len, M_KTLS, M_WAITOK); in ktls_copyin_tls_enable()
364 if (sopt->sopt_td != NULL) { in ktls_copyin_tls_enable()
365 error = copyin(tls->iv, iv, tls->iv_len); in ktls_copyin_tls_enable()
369 bcopy(tls->iv, iv, tls->iv_len); in ktls_copyin_tls_enable()
372 if (tls->auth_key_len != 0) { in ktls_copyin_tls_enable()
373 auth_key = malloc(tls->auth_key_len, M_KTLS, M_WAITOK); in ktls_copyin_tls_enable()
374 if (sopt->sopt_td != NULL) { in ktls_copyin_tls_enable()
375 error = copyin(tls->auth_key, auth_key, tls->auth_key_len); in ktls_copyin_tls_enable()
379 bcopy(tls->auth_key, auth_key, tls->auth_key_len); in ktls_copyin_tls_enable()
382 tls->cipher_key = cipher_key; in ktls_copyin_tls_enable()
383 tls->iv = iv; in ktls_copyin_tls_enable()
384 tls->auth_key = auth_key; in ktls_copyin_tls_enable()
399 zfree(__DECONST(void *, tls->cipher_key), M_KTLS); in ktls_cleanup_tls_enable()
400 zfree(__DECONST(void *, tls->iv), M_KTLS); in ktls_cleanup_tls_enable()
401 zfree(__DECONST(void *, tls->auth_key), M_KTLS); in ktls_cleanup_tls_enable()
415 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype); in ktls_get_cpu()
426 if (ktls_bind_threads > 1 && inp->inp_numa_domain != M_NODOM) { in ktls_get_cpu()
427 di = &ktls_domains[inp->inp_numa_domain]; in ktls_get_cpu()
428 cpuid = di->cpu[inp->inp_flowid % di->count]; in ktls_get_cpu()
431 cpuid = ktls_cpuid_lookup[inp->inp_flowid % ktls_number_threads]; in ktls_get_cpu()
442 ("%s: ktls max length %d is not page size-aligned", in ktls_buffer_import()
476 uma_zfree(ktls_buffer_zone, (void *)PHYS_TO_DMAP(m->m_epg_pa[0])); in ktls_free_mext_contig()
511 domain = pc->pc_domain; in ktls_init()
544 * Start an allocation thread per-domain to perform blocking allocations in ktls_init()
592 state = -1; in ktls_start_kthreads()
599 ktls_create_session(struct socket *so, struct tls_enable *en, in ktls_create_session() argument
605 /* Only TLS 1.0 - 1.3 are supported. */ in ktls_create_session()
606 if (en->tls_vmajor != TLS_MAJOR_VER_ONE) in ktls_create_session()
608 if (en->tls_vminor < TLS_MINOR_VER_ZERO || in ktls_create_session()
609 en->tls_vminor > TLS_MINOR_VER_THREE) in ktls_create_session()
614 if (en->flags != 0) in ktls_create_session()
618 switch (en->cipher_algorithm) { in ktls_create_session()
624 switch (en->auth_algorithm) { in ktls_create_session()
627 /* XXX: Really 13.0-current COMPAT. */ in ktls_create_session()
636 if (en->auth_key_len != 0) in ktls_create_session()
638 switch (en->tls_vminor) { in ktls_create_session()
640 if (en->iv_len != TLS_AEAD_GCM_LEN) in ktls_create_session()
644 if (en->iv_len != TLS_1_3_GCM_IV_LEN) in ktls_create_session()
652 switch (en->auth_algorithm) { in ktls_create_session()
657 if (en->tls_vminor != TLS_MINOR_VER_TWO) in ktls_create_session()
663 if (en->auth_key_len == 0) in ktls_create_session()
670 switch (en->tls_vminor) { in ktls_create_session()
672 if (en->iv_len != TLS_CBC_IMPLICIT_IV_LEN) in ktls_create_session()
678 en->iv_len = 0; in ktls_create_session()
685 if (en->auth_algorithm != 0 || en->auth_key_len != 0) in ktls_create_session()
687 if (en->tls_vminor != TLS_MINOR_VER_TWO && in ktls_create_session()
688 en->tls_vminor != TLS_MINOR_VER_THREE) in ktls_create_session()
690 if (en->iv_len != TLS_CHACHA20_IV_LEN) in ktls_create_session()
705 refcount_init(&tls->refcount, 1); in ktls_create_session()
707 TASK_INIT(&tls->reset_tag_task, 0, ktls_reset_receive_tag, tls); in ktls_create_session()
709 TASK_INIT(&tls->reset_tag_task, 0, ktls_reset_send_tag, tls); in ktls_create_session()
710 tls->inp = so->so_pcb; in ktls_create_session()
711 in_pcbref(tls->inp); in ktls_create_session()
712 tls->tx = true; in ktls_create_session()
715 tls->wq_index = ktls_get_cpu(so); in ktls_create_session()
717 tls->params.cipher_algorithm = en->cipher_algorithm; in ktls_create_session()
718 tls->params.auth_algorithm = en->auth_algorithm; in ktls_create_session()
719 tls->params.tls_vmajor = en->tls_vmajor; in ktls_create_session()
720 tls->params.tls_vminor = en->tls_vminor; in ktls_create_session()
721 tls->params.flags = en->flags; in ktls_create_session()
722 tls->params.max_frame_len = min(TLS_MAX_MSG_SIZE_V10_2, ktls_maxlen); in ktls_create_session()
725 tls->params.tls_hlen = sizeof(struct tls_record_layer); in ktls_create_session()
726 switch (en->cipher_algorithm) { in ktls_create_session()
729 * TLS 1.2 uses a 4 byte implicit IV with an explicit 8 byte in ktls_create_session()
730 * nonce. TLS 1.3 uses a 12 byte implicit IV. in ktls_create_session()
732 if (en->tls_vminor < TLS_MINOR_VER_THREE) in ktls_create_session()
733 tls->params.tls_hlen += sizeof(uint64_t); in ktls_create_session()
734 tls->params.tls_tlen = AES_GMAC_HASH_LEN; in ktls_create_session()
735 tls->params.tls_bs = 1; in ktls_create_session()
738 switch (en->auth_algorithm) { in ktls_create_session()
740 if (en->tls_vminor == TLS_MINOR_VER_ZERO) { in ktls_create_session()
742 tls->sequential_records = true; in ktls_create_session()
743 tls->next_seqno = be64dec(en->rec_seq); in ktls_create_session()
744 STAILQ_INIT(&tls->pending_records); in ktls_create_session()
746 tls->params.tls_hlen += AES_BLOCK_LEN; in ktls_create_session()
748 tls->params.tls_tlen = AES_BLOCK_LEN + in ktls_create_session()
752 tls->params.tls_hlen += AES_BLOCK_LEN; in ktls_create_session()
753 tls->params.tls_tlen = AES_BLOCK_LEN + in ktls_create_session()
757 tls->params.tls_hlen += AES_BLOCK_LEN; in ktls_create_session()
758 tls->params.tls_tlen = AES_BLOCK_LEN + in ktls_create_session()
764 tls->params.tls_bs = AES_BLOCK_LEN; in ktls_create_session()
768 * Chacha20 uses a 12 byte implicit IV. in ktls_create_session()
770 tls->params.tls_tlen = POLY1305_HASH_LEN; in ktls_create_session()
771 tls->params.tls_bs = 1; in ktls_create_session()
782 if (en->tls_vminor == TLS_MINOR_VER_THREE) in ktls_create_session()
783 tls->params.tls_tlen += sizeof(uint8_t); in ktls_create_session()
785 KASSERT(tls->params.tls_hlen <= MBUF_PEXT_HDR_LEN, in ktls_create_session()
786 ("TLS header length too long: %d", tls->params.tls_hlen)); in ktls_create_session()
787 KASSERT(tls->params.tls_tlen <= MBUF_PEXT_TRAIL_LEN, in ktls_create_session()
788 ("TLS trailer length too long: %d", tls->params.tls_tlen)); in ktls_create_session()
790 if (en->auth_key_len != 0) { in ktls_create_session()
791 tls->params.auth_key_len = en->auth_key_len; in ktls_create_session()
792 tls->params.auth_key = malloc(en->auth_key_len, M_KTLS, in ktls_create_session()
794 bcopy(en->auth_key, tls->params.auth_key, en->auth_key_len); in ktls_create_session()
797 tls->params.cipher_key_len = en->cipher_key_len; in ktls_create_session()
798 tls->params.cipher_key = malloc(en->cipher_key_len, M_KTLS, M_WAITOK); in ktls_create_session()
799 bcopy(en->cipher_key, tls->params.cipher_key, en->cipher_key_len); in ktls_create_session()
806 if (en->iv_len != 0) { in ktls_create_session()
807 tls->params.iv_len = en->iv_len; in ktls_create_session()
808 bcopy(en->iv, tls->params.iv, en->iv_len); in ktls_create_session()
811 * For TLS 1.2 with GCM, generate an 8-byte nonce as a in ktls_create_session()
815 * array so that it is 8-byte aligned. in ktls_create_session()
817 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16 && in ktls_create_session()
818 en->tls_vminor == TLS_MINOR_VER_TWO) in ktls_create_session()
819 arc4rand(tls->params.iv + 8, sizeof(uint64_t), 0); in ktls_create_session()
822 tls->gen = 0; in ktls_create_session()
836 refcount_init(&tls_new->refcount, 1); in ktls_clone_session()
838 TASK_INIT(&tls_new->reset_tag_task, 0, ktls_reset_receive_tag, in ktls_clone_session()
841 TASK_INIT(&tls_new->reset_tag_task, 0, ktls_reset_send_tag, in ktls_clone_session()
843 tls_new->inp = tls->inp; in ktls_clone_session()
844 tls_new->tx = true; in ktls_clone_session()
845 in_pcbref(tls_new->inp); in ktls_clone_session()
849 tls_new->params = tls->params; in ktls_clone_session()
850 tls_new->wq_index = tls->wq_index; in ktls_clone_session()
853 if (tls_new->params.auth_key != NULL) { in ktls_clone_session()
854 tls_new->params.auth_key = malloc(tls->params.auth_key_len, in ktls_clone_session()
856 memcpy(tls_new->params.auth_key, tls->params.auth_key, in ktls_clone_session()
857 tls->params.auth_key_len); in ktls_clone_session()
860 tls_new->params.cipher_key = malloc(tls->params.cipher_key_len, M_KTLS, in ktls_clone_session()
862 memcpy(tls_new->params.cipher_key, tls->params.cipher_key, in ktls_clone_session()
863 tls->params.cipher_key_len); in ktls_clone_session()
865 tls_new->gen = 0; in ktls_clone_session()
877 inp = so->so_pcb; in ktls_try_toe()
879 if (inp->inp_flags & INP_DROPPED) { in ktls_try_toe()
883 if (inp->inp_socket == NULL) { in ktls_try_toe()
888 if (!(tp->t_flags & TF_TOE)) { in ktls_try_toe()
896 tls->mode = TCP_TLS_MODE_TOE; in ktls_try_toe()
897 switch (tls->params.cipher_algorithm) { in ktls_try_toe()
930 if (inp->inp_flags & INP_DROPPED) { in ktls_alloc_snd_tag()
934 if (inp->inp_socket == NULL) { in ktls_alloc_snd_tag()
944 * - Always permit 'force' requests. in ktls_alloc_snd_tag()
945 * - ktls_ifnet_permitted == 0: always deny. in ktls_alloc_snd_tag()
959 nh = inp->inp_route.ro_nh; in ktls_alloc_snd_tag()
964 ifp = nh->nh_ifp; in ktls_alloc_snd_tag()
971 if (tp->t_pacing_rate != -1 && in ktls_alloc_snd_tag()
976 params.tls_rate_limit.max_rate = tp->t_pacing_rate; in ktls_alloc_snd_tag()
982 params.hdr.flowid = inp->inp_flowid; in ktls_alloc_snd_tag()
983 params.hdr.flowtype = inp->inp_flowtype; in ktls_alloc_snd_tag()
984 params.hdr.numa_domain = inp->inp_numa_domain; in ktls_alloc_snd_tag()
991 if (inp->inp_vflag & INP_IPV6) { in ktls_alloc_snd_tag()
1030 if (inp->inp_flags & INP_DROPPED) { in ktls_alloc_rcv_tag()
1034 if (inp->inp_socket == NULL) { in ktls_alloc_rcv_tag()
1052 nh = inp->inp_route.ro_nh; in ktls_alloc_rcv_tag()
1057 ifp = nh->nh_ifp; in ktls_alloc_rcv_tag()
1059 tls->rx_ifp = ifp; in ktls_alloc_rcv_tag()
1062 params.hdr.flowid = inp->inp_flowid; in ktls_alloc_rcv_tag()
1063 params.hdr.flowtype = inp->inp_flowtype; in ktls_alloc_rcv_tag()
1064 params.hdr.numa_domain = inp->inp_numa_domain; in ktls_alloc_rcv_tag()
1071 if (inp->inp_vflag & INP_IPV6) { in ktls_alloc_rcv_tag()
1092 tls->rx_vlan_id = params.tls_rx.vlan_id; in ktls_alloc_rcv_tag()
1106 error = ktls_alloc_snd_tag(so->so_pcb, tls, force, &mst); in ktls_try_ifnet()
1112 error = ktls_alloc_rcv_tag(so->so_pcb, tls, &mst); in ktls_try_ifnet()
1120 tls->mode = TCP_TLS_MODE_IFNET; in ktls_try_ifnet()
1121 tls->snd_tag = mst; in ktls_try_ifnet()
1123 switch (tls->params.cipher_algorithm) { in ktls_try_ifnet()
1143 tls->mode = TCP_TLS_MODE_SW; in ktls_use_sw()
1144 switch (tls->params.cipher_algorithm) { in ktls_use_sw()
1181 * To manage not-yet-decrypted data for KTLS RX, the following scheme
1184 * - A single chain of NOTREADY mbufs is hung off of sb_mtls.
1186 * - ktls_check_rx checks this chain of mbufs reading the TLS header
1190 * - The worker thread calls ktls_decrypt to decrypt TLS records in
1202 m = sb->sb_mb; in sb_mark_notready()
1203 sb->sb_mtls = m; in sb_mark_notready()
1204 sb->sb_mb = NULL; in sb_mark_notready()
1205 sb->sb_mbtail = NULL; in sb_mark_notready()
1206 sb->sb_lastrecord = NULL; in sb_mark_notready()
1207 for (; m != NULL; m = m->m_next) { in sb_mark_notready()
1208 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt != NULL", in sb_mark_notready()
1210 KASSERT((m->m_flags & M_NOTAVAIL) == 0, ("%s: mbuf not avail", in sb_mark_notready()
1212 KASSERT(sb->sb_acc >= m->m_len, ("%s: sb_acc < m->m_len", in sb_mark_notready()
1214 m->m_flags |= M_NOTREADY; in sb_mark_notready()
1215 sb->sb_acc -= m->m_len; in sb_mark_notready()
1216 sb->sb_tlscc += m->m_len; in sb_mark_notready()
1217 sb->sb_mtlstail = m; in sb_mark_notready()
1219 KASSERT(sb->sb_acc == 0 && sb->sb_tlscc == sb->sb_ccc, in sb_mark_notready()
1220 ("%s: acc %u tlscc %u ccc %u", __func__, sb->sb_acc, sb->sb_tlscc, in sb_mark_notready()
1221 sb->sb_ccc)); in sb_mark_notready()
1243 MPASS(sb->sb_flags & SB_TLS_RX); in ktls_pending_rx_info()
1244 seqno = sb->sb_tls_seqno; in ktls_pending_rx_info()
1245 resid = sb->sb_tlscc; in ktls_pending_rx_info()
1246 m = sb->sb_mtls; in ktls_pending_rx_info()
1260 *residp = sizeof(hdr) - resid; in ktls_pending_rx_info()
1269 *residp = record_len - resid; in ktls_pending_rx_info()
1272 resid -= record_len; in ktls_pending_rx_info()
1275 if (m->m_len - offset > record_len) { in ktls_pending_rx_info()
1280 record_len -= (m->m_len - offset); in ktls_pending_rx_info()
1282 m = m->m_next; in ktls_pending_rx_info()
1288 ktls_enable_rx(struct socket *so, struct tls_enable *en) in ktls_enable_rx() argument
1302 if (so->so_proto->pr_protocol != IPPROTO_TCP) in ktls_enable_rx()
1309 if (so->so_rcv.sb_tls_info != NULL) in ktls_enable_rx()
1312 if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable) in ktls_enable_rx()
1315 error = ktls_create_session(so, en, &tls, KTLS_RX); in ktls_enable_rx()
1337 if (__predict_false(so->so_rcv.sb_tls_info != NULL)) in ktls_enable_rx()
1339 else if ((so->so_rcv.sb_flags & SB_SPLICED) != 0) in ktls_enable_rx()
1347 so->so_rcv.sb_tls_seqno = be64dec(en->rec_seq); in ktls_enable_rx()
1348 so->so_rcv.sb_tls_info = tls; in ktls_enable_rx()
1349 so->so_rcv.sb_flags |= SB_TLS_RX; in ktls_enable_rx()
1352 sb_mark_notready(&so->so_rcv); in ktls_enable_rx()
1353 ktls_check_rx(&so->so_rcv); in ktls_enable_rx()
1357 /* Prefer TOE -> ifnet TLS -> software TLS. */ in ktls_enable_rx()
1372 ktls_enable_tx(struct socket *so, struct tls_enable *en) in ktls_enable_tx() argument
1388 if (so->so_proto->pr_protocol != IPPROTO_TCP) in ktls_enable_tx()
1395 if (so->so_snd.sb_tls_info != NULL) in ktls_enable_tx()
1398 if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable) in ktls_enable_tx()
1405 error = ktls_create_session(so, en, &tls, KTLS_TX); in ktls_enable_tx()
1409 /* Prefer TOE -> ifnet TLS -> software TLS. */ in ktls_enable_tx()
1438 inp = so->so_pcb; in ktls_enable_tx()
1441 if (__predict_false(so->so_snd.sb_tls_info != NULL)) in ktls_enable_tx()
1443 else if ((so->so_snd.sb_flags & SB_SPLICED) != 0) in ktls_enable_tx()
1452 so->so_snd.sb_tls_seqno = be64dec(en->rec_seq); in ktls_enable_tx()
1453 so->so_snd.sb_tls_info = tls; in ktls_enable_tx()
1454 if (tls->mode != TCP_TLS_MODE_SW) { in ktls_enable_tx()
1456 MPASS(tp->t_nic_ktls_xmit == 0); in ktls_enable_tx()
1457 tp->t_nic_ktls_xmit = 1; in ktls_enable_tx()
1458 if (tp->t_fb->tfb_hwtls_change != NULL) in ktls_enable_tx()
1459 (*tp->t_fb->tfb_hwtls_change)(tp, 1); in ktls_enable_tx()
1478 inp = so->so_pcb; in ktls_get_rx_mode()
1481 tls = so->so_rcv.sb_tls_info; in ktls_get_rx_mode()
1485 *modep = tls->mode; in ktls_get_rx_mode()
1491 * ktls_get_rx_sequence - get the next TCP- and TLS- sequence number.
1493 * This function gets information about the next TCP- and TLS-
1500 * This function returns zero on success, else a non-zero error code
1510 so = inp->inp_socket; in ktls_get_rx_sequence()
1515 if (inp->inp_flags & INP_DROPPED) { in ktls_get_rx_sequence()
1523 SOCKBUF_LOCK(&so->so_rcv); in ktls_get_rx_sequence()
1524 *tcpseq = tp->rcv_nxt - so->so_rcv.sb_tlscc; in ktls_get_rx_sequence()
1525 *tlsseq = so->so_rcv.sb_tls_seqno; in ktls_get_rx_sequence()
1526 SOCKBUF_UNLOCK(&so->so_rcv); in ktls_get_rx_sequence()
1541 inp = so->so_pcb; in ktls_get_tx_mode()
1544 tls = so->so_snd.sb_tls_info; in ktls_get_tx_mode()
1548 *modep = tls->mode; in ktls_get_tx_mode()
1574 inp = so->so_pcb; in ktls_set_tx_mode()
1580 if (tp->t_nic_ktls_xmit) in ktls_set_tx_mode()
1587 if (tp->t_nic_ktls_xmit_dis) in ktls_set_tx_mode()
1591 SOCKBUF_LOCK(&so->so_snd); in ktls_set_tx_mode()
1592 tls = so->so_snd.sb_tls_info; in ktls_set_tx_mode()
1594 SOCKBUF_UNLOCK(&so->so_snd); in ktls_set_tx_mode()
1598 if (tls->mode == mode) { in ktls_set_tx_mode()
1599 SOCKBUF_UNLOCK(&so->so_snd); in ktls_set_tx_mode()
1604 SOCKBUF_UNLOCK(&so->so_snd); in ktls_set_tx_mode()
1634 if (tls != so->so_snd.sb_tls_info) { in ktls_set_tx_mode()
1644 SOCKBUF_LOCK(&so->so_snd); in ktls_set_tx_mode()
1645 so->so_snd.sb_tls_info = tls_new; in ktls_set_tx_mode()
1646 if (tls_new->mode != TCP_TLS_MODE_SW) { in ktls_set_tx_mode()
1647 MPASS(tp->t_nic_ktls_xmit == 0); in ktls_set_tx_mode()
1648 tp->t_nic_ktls_xmit = 1; in ktls_set_tx_mode()
1649 if (tp->t_fb->tfb_hwtls_change != NULL) in ktls_set_tx_mode()
1650 (*tp->t_fb->tfb_hwtls_change)(tp, 1); in ktls_set_tx_mode()
1652 SOCKBUF_UNLOCK(&so->so_snd); in ktls_set_tx_mode()
1660 KASSERT(tls->refcount >= 2, ("too few references on old session")); in ktls_set_tx_mode()
1692 so = tls->so; in ktls_reset_receive_tag()
1693 inp = so->so_pcb; in ktls_reset_receive_tag()
1697 if (inp->inp_flags & INP_DROPPED) { in ktls_reset_receive_tag()
1702 SOCKBUF_LOCK(&so->so_rcv); in ktls_reset_receive_tag()
1703 mst = tls->snd_tag; in ktls_reset_receive_tag()
1704 tls->snd_tag = NULL; in ktls_reset_receive_tag()
1708 ifp = tls->rx_ifp; in ktls_reset_receive_tag()
1710 SOCKBUF_UNLOCK(&so->so_rcv); in ktls_reset_receive_tag()
1713 params.hdr.flowid = inp->inp_flowid; in ktls_reset_receive_tag()
1714 params.hdr.flowtype = inp->inp_flowtype; in ktls_reset_receive_tag()
1715 params.hdr.numa_domain = inp->inp_numa_domain; in ktls_reset_receive_tag()
1718 params.tls_rx.vlan_id = tls->rx_vlan_id; in ktls_reset_receive_tag()
1721 if (inp->inp_vflag & INP_IPV6) { in ktls_reset_receive_tag()
1731 SOCKBUF_LOCK(&so->so_rcv); in ktls_reset_receive_tag()
1732 tls->snd_tag = mst; in ktls_reset_receive_tag()
1733 SOCKBUF_UNLOCK(&so->so_rcv); in ktls_reset_receive_tag()
1748 tls->reset_pending = false; in ktls_reset_receive_tag()
1753 CURVNET_SET(so->so_vnet); in ktls_reset_receive_tag()
1780 inp = tls->inp; in ktls_reset_send_tag()
1788 * Write-lock the INP when changing tls->snd_tag since in ktls_reset_send_tag()
1789 * ip[6]_output_send() holds a read-lock when reading the in ktls_reset_send_tag()
1793 old = tls->snd_tag; in ktls_reset_send_tag()
1794 tls->snd_tag = NULL; in ktls_reset_send_tag()
1803 tls->snd_tag = new; in ktls_reset_send_tag()
1805 tls->reset_pending = false; in ktls_reset_send_tag()
1818 if (!(inp->inp_flags & INP_DROPPED)) { in ktls_reset_send_tag()
1820 CURVNET_SET(inp->inp_vnet); in ktls_reset_send_tag()
1849 KASSERT(sb->sb_flags & SB_TLS_RX, ("%s: sockbuf %p isn't TLS RX", in ktls_input_ifp_mismatch()
1853 tls = sb->sb_tls_info; in ktls_input_ifp_mismatch()
1854 if_rele(tls->rx_ifp); in ktls_input_ifp_mismatch()
1856 tls->rx_ifp = ifp; in ktls_input_ifp_mismatch()
1863 if (!tls->reset_pending) { in ktls_input_ifp_mismatch()
1866 tls->so = so; in ktls_input_ifp_mismatch()
1867 tls->reset_pending = true; in ktls_input_ifp_mismatch()
1868 taskqueue_enqueue(taskqueue_thread, &tls->reset_tag_task); in ktls_input_ifp_mismatch()
1887 if (!tls->reset_pending) { in ktls_output_eagain()
1889 tls->reset_pending = true; in ktls_output_eagain()
1890 taskqueue_enqueue(taskqueue_thread, &tls->reset_tag_task); in ktls_output_eagain()
1909 MPASS(tls->mode == TCP_TLS_MODE_IFNET); in ktls_modify_txrtlmt()
1911 if (tls->snd_tag == NULL) { in ktls_modify_txrtlmt()
1921 mst = tls->snd_tag; in ktls_modify_txrtlmt()
1924 MPASS(mst->sw->type == IF_SND_TAG_TYPE_TLS_RATE_LIMIT); in ktls_modify_txrtlmt()
1926 return (mst->sw->snd_tag_modify(mst, ¶ms)); in ktls_modify_txrtlmt()
1943 MPASS(tls->refcount == 0); in ktls_destroy()
1945 inp = tls->inp; in ktls_destroy()
1946 if (tls->tx) { in ktls_destroy()
1958 if (curthread->td_rw_rlocks == 0) { in ktls_destroy()
1969 TASK_INIT(&tls->destroy_task, 0, in ktls_destroy()
1972 &tls->destroy_task); in ktls_destroy()
1978 if (tls->sequential_records) { in ktls_destroy()
1982 STAILQ_FOREACH_SAFE(m, &tls->pending_records, m_epg_stailq, n) { in ktls_destroy()
1983 page_count = m->m_epg_enc_cnt; in ktls_destroy()
1985 KASSERT(page_count >= m->m_epg_nrdy, in ktls_destroy()
1987 page_count -= m->m_epg_nrdy; in ktls_destroy()
1993 counter_u64_add(ktls_offload_active, -1); in ktls_destroy()
1994 switch (tls->mode) { in ktls_destroy()
1996 switch (tls->params.cipher_algorithm) { in ktls_destroy()
1998 counter_u64_add(ktls_sw_cbc, -1); in ktls_destroy()
2001 counter_u64_add(ktls_sw_gcm, -1); in ktls_destroy()
2004 counter_u64_add(ktls_sw_chacha20, -1); in ktls_destroy()
2009 switch (tls->params.cipher_algorithm) { in ktls_destroy()
2011 counter_u64_add(ktls_ifnet_cbc, -1); in ktls_destroy()
2014 counter_u64_add(ktls_ifnet_gcm, -1); in ktls_destroy()
2017 counter_u64_add(ktls_ifnet_chacha20, -1); in ktls_destroy()
2020 if (tls->snd_tag != NULL) in ktls_destroy()
2021 m_snd_tag_rele(tls->snd_tag); in ktls_destroy()
2022 if (tls->rx_ifp != NULL) in ktls_destroy()
2023 if_rele(tls->rx_ifp); in ktls_destroy()
2024 if (tls->tx) { in ktls_destroy()
2027 MPASS(tp->t_nic_ktls_xmit == 1); in ktls_destroy()
2028 tp->t_nic_ktls_xmit = 0; in ktls_destroy()
2033 switch (tls->params.cipher_algorithm) { in ktls_destroy()
2035 counter_u64_add(ktls_toe_cbc, -1); in ktls_destroy()
2038 counter_u64_add(ktls_toe_gcm, -1); in ktls_destroy()
2041 counter_u64_add(ktls_toe_chacha20, -1); in ktls_destroy()
2047 if (tls->ocf_session != NULL) in ktls_destroy()
2049 if (tls->params.auth_key != NULL) { in ktls_destroy()
2050 zfree(tls->params.auth_key, M_KTLS); in ktls_destroy()
2051 tls->params.auth_key = NULL; in ktls_destroy()
2052 tls->params.auth_key_len = 0; in ktls_destroy()
2054 if (tls->params.cipher_key != NULL) { in ktls_destroy()
2055 zfree(tls->params.cipher_key, M_KTLS); in ktls_destroy()
2056 tls->params.cipher_key = NULL; in ktls_destroy()
2057 tls->params.cipher_key_len = 0; in ktls_destroy()
2059 if (tls->tx) { in ktls_destroy()
2064 explicit_bzero(tls->params.iv, sizeof(tls->params.iv)); in ktls_destroy()
2073 for (; m != NULL; m = m->m_next) { in ktls_seq()
2074 KASSERT((m->m_flags & M_EXTPG) != 0, in ktls_seq()
2077 m->m_epg_seqno = sb->sb_tls_seqno; in ktls_seq()
2078 sb->sb_tls_seqno++; in ktls_seq()
2107 maxlen = tls->params.max_frame_len; in ktls_frame()
2109 for (m = top; m != NULL; m = m->m_next) { in ktls_frame()
2116 KASSERT(m->m_len <= maxlen && m->m_len >= 0 && in ktls_frame()
2117 (m->m_len > 0 || ktls_permit_empty_frames(tls)), in ktls_frame()
2118 ("ktls_frame: m %p len %d", m, m->m_len)); in ktls_frame()
2124 KASSERT((m->m_flags & M_EXTPG) != 0, in ktls_frame()
2127 tls_len = m->m_len; in ktls_frame()
2130 m->m_epg_tls = ktls_hold(tls); in ktls_frame()
2132 m->m_epg_hdrlen = tls->params.tls_hlen; in ktls_frame()
2133 m->m_epg_trllen = tls->params.tls_tlen; in ktls_frame()
2134 if (tls->params.cipher_algorithm == CRYPTO_AES_CBC) { in ktls_frame()
2138 * AES-CBC pads messages to a multiple of the in ktls_frame()
2142 * At least one byte of padding is always in ktls_frame()
2147 * tls->params.tls_tlen is the maximum in ktls_frame()
2153 bs = tls->params.tls_bs; in ktls_frame()
2154 delta = (tls_len + tls->params.tls_tlen) & (bs - 1); in ktls_frame()
2155 m->m_epg_trllen -= delta; in ktls_frame()
2157 m->m_len += m->m_epg_hdrlen + m->m_epg_trllen; in ktls_frame()
2160 tlshdr = (void *)m->m_epg_hdr; in ktls_frame()
2161 tlshdr->tls_vmajor = tls->params.tls_vmajor; in ktls_frame()
2167 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE && in ktls_frame()
2168 tls->params.tls_vmajor == TLS_MAJOR_VER_ONE) { in ktls_frame()
2169 tlshdr->tls_vminor = TLS_MINOR_VER_TWO; in ktls_frame()
2170 tlshdr->tls_type = TLS_RLTYPE_APP; in ktls_frame()
2172 m->m_epg_record_type = record_type; in ktls_frame()
2173 m->m_epg_trail[0] = record_type; in ktls_frame()
2175 tlshdr->tls_vminor = tls->params.tls_vminor; in ktls_frame()
2176 tlshdr->tls_type = record_type; in ktls_frame()
2178 tlshdr->tls_length = htons(m->m_len - sizeof(*tlshdr)); in ktls_frame()
2184 * For GCM with TLS 1.2, an 8 byte nonce is copied in ktls_frame()
2190 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 && in ktls_frame()
2191 tls->params.tls_vminor == TLS_MINOR_VER_TWO) { in ktls_frame()
2192 noncep = (uint64_t *)(tls->params.iv + 8); in ktls_frame()
2195 } else if (tls->params.cipher_algorithm == CRYPTO_AES_CBC && in ktls_frame()
2196 tls->params.tls_vminor >= TLS_MINOR_VER_ONE) in ktls_frame()
2207 if (tls->mode == TCP_TLS_MODE_SW) { in ktls_frame()
2208 m->m_flags |= M_NOTREADY; in ktls_frame()
2211 m->m_epg_nrdy = 1; in ktls_frame()
2213 m->m_epg_nrdy = m->m_epg_npgs; in ktls_frame()
2214 *enq_cnt += m->m_epg_nrdy; in ktls_frame()
2222 return (tls->params.cipher_algorithm == CRYPTO_AES_CBC && in ktls_permit_empty_frames()
2223 tls->params.tls_vminor == TLS_MINOR_VER_ZERO); in ktls_permit_empty_frames()
2235 KASSERT(sb->sb_flags & SB_TLS_RX, ("%s: sockbuf %p isn't TLS RX", in ktls_check_rx()
2239 if (sb->sb_flags & SB_TLS_RX_RUNNING) in ktls_check_rx()
2243 if (sb->sb_tlscc < sizeof(hdr)) { in ktls_check_rx()
2244 if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc != 0) in ktls_check_rx()
2245 so->so_error = EMSGSIZE; in ktls_check_rx()
2249 m_copydata(sb->sb_mtls, 0, sizeof(hdr), (void *)&hdr); in ktls_check_rx()
2252 if (sb->sb_tlscc < sizeof(hdr) + ntohs(hdr.tls_length)) { in ktls_check_rx()
2253 if ((sb->sb_state & SBS_CANTRCVMORE) != 0) in ktls_check_rx()
2254 so->so_error = EMSGSIZE; in ktls_check_rx()
2258 sb->sb_flags |= SB_TLS_RX_RUNNING; in ktls_check_rx()
2261 wq = &ktls_wq[so->so_rcv.sb_tls_info->wq_index]; in ktls_check_rx()
2262 mtx_lock(&wq->mtx); in ktls_check_rx()
2263 STAILQ_INSERT_TAIL(&wq->so_head, so, so_ktls_rx_list); in ktls_check_rx()
2264 running = wq->running; in ktls_check_rx()
2265 mtx_unlock(&wq->mtx); in ktls_check_rx()
2278 MPASS(len <= sb->sb_tlscc); in ktls_detach_record()
2284 top = sb->sb_mtls; in ktls_detach_record()
2285 if (sb->sb_tlscc == len) { in ktls_detach_record()
2286 sb->sb_mtls = NULL; in ktls_detach_record()
2287 sb->sb_mtlstail = NULL; in ktls_detach_record()
2297 for (m = top; remain > m->m_len; m = m->m_next) in ktls_detach_record()
2298 remain -= m->m_len; in ktls_detach_record()
2301 if (remain == m->m_len) { in ktls_detach_record()
2302 sb->sb_mtls = m->m_next; in ktls_detach_record()
2303 if (sb->sb_mtls == NULL) in ktls_detach_record()
2304 sb->sb_mtlstail = NULL; in ktls_detach_record()
2305 m->m_next = NULL; in ktls_detach_record()
2325 if (sb->sb_mtls != top) { in ktls_detach_record()
2330 n->m_flags |= (m->m_flags & (M_NOTREADY | M_DECRYPTED)); in ktls_detach_record()
2333 n->m_len = m->m_len - remain; in ktls_detach_record()
2334 if (m->m_flags & M_EXT) { in ktls_detach_record()
2335 n->m_data = m->m_data + remain; in ktls_detach_record()
2338 bcopy(mtod(m, caddr_t) + remain, mtod(n, caddr_t), n->m_len); in ktls_detach_record()
2342 m->m_len -= n->m_len; in ktls_detach_record()
2343 sb->sb_tlscc -= n->m_len; in ktls_detach_record()
2344 sb->sb_ccc -= n->m_len; in ktls_detach_record()
2350 sb->sb_mtls = n; in ktls_detach_record()
2351 n->m_next = m->m_next; in ktls_detach_record()
2352 if (sb->sb_mtlstail == m) in ktls_detach_record()
2353 sb->sb_mtlstail = n; in ktls_detach_record()
2356 m->m_next = NULL; in ktls_detach_record()
2360 for (m = top; m != NULL; m = m->m_next) in ktls_detach_record()
2362 sb->sb_tlsdcc = len; in ktls_detach_record()
2363 sb->sb_ccc += len; in ktls_detach_record()
2370 * record type in the byte before the padding.
2373 * be to scan forwards remembering the last non-zero byte before the
2375 * Instead, find the last non-zero byte of each mbuf in the chain
2376 * keeping track of the relative offset of that nonzero byte.
2390 digest_start = tls_len - *trailer_len; in tls13_find_record_type()
2394 offset += m->m_len, m = m->m_next) { in tls13_find_record_type()
2396 m_len = min(digest_start - offset, m->m_len); in tls13_find_record_type()
2399 /* Find last non-zero byte in this mbuf. */ in tls13_find_record_type()
2400 while (m_len > 0 && cp[m_len - 1] == 0) in tls13_find_record_type()
2401 m_len--; in tls13_find_record_type()
2403 record_type = cp[m_len - 1]; in tls13_find_record_type()
2407 if (last_offset < tls->params.tls_hlen) in tls13_find_record_type()
2411 *trailer_len = tls_len - last_offset + 1; in tls13_find_record_type()
2426 int m_flags_anded = -1; in ktls_mbuf_crypto_state()
2428 for (; mb != NULL; mb = mb->m_next) { in ktls_mbuf_crypto_state()
2429 if (offset < mb->m_len) in ktls_mbuf_crypto_state()
2431 offset -= mb->m_len; in ktls_mbuf_crypto_state()
2435 for (; mb != NULL; mb = mb->m_next) { in ktls_mbuf_crypto_state()
2436 m_flags_ored |= mb->m_flags; in ktls_mbuf_crypto_state()
2437 m_flags_anded &= mb->m_flags; in ktls_mbuf_crypto_state()
2439 if (offset <= mb->m_len) in ktls_mbuf_crypto_state()
2441 offset -= mb->m_len; in ktls_mbuf_crypto_state()
2454 * ktls_resync_ifnet - get HW TLS RX back on track after packet loss
2464 mst = so->so_rcv.sb_tls_info->snd_tag; in ktls_resync_ifnet()
2473 if (inp->inp_flags & INP_DROPPED) { in ktls_resync_ifnet()
2482 SOCKBUF_LOCK(&so->so_rcv); in ktls_resync_ifnet()
2484 tp->rcv_nxt - so->so_rcv.sb_tlscc - tls_len; in ktls_resync_ifnet()
2487 SOCKBUF_UNLOCK(&so->so_rcv); in ktls_resync_ifnet()
2491 MPASS(mst->sw->type == IF_SND_TAG_TYPE_TLS_RX); in ktls_resync_ifnet()
2492 return (mst->sw->snd_tag_modify(mst, ¶ms)); in ktls_resync_ifnet()
2504 if (!(inp->inp_flags & INP_DROPPED)) { in ktls_drop()
2506 CURVNET_SET(inp->inp_vnet); in ktls_drop()
2512 so->so_error = error; in ktls_drop()
2536 sb = &so->so_rcv; in ktls_decrypt()
2538 KASSERT(sb->sb_flags & SB_TLS_RX_RUNNING, in ktls_decrypt()
2541 tls = sb->sb_tls_info; in ktls_decrypt()
2544 tls13 = (tls->params.tls_vminor == TLS_MINOR_VER_THREE); in ktls_decrypt()
2548 vminor = tls->params.tls_vminor; in ktls_decrypt()
2551 if (sb->sb_tlscc < tls->params.tls_hlen) in ktls_decrypt()
2554 m_copydata(sb->sb_mtls, 0, tls->params.tls_hlen, tls_header); in ktls_decrypt()
2555 tls_len = sizeof(*hdr) + ntohs(hdr->tls_length); in ktls_decrypt()
2557 if (hdr->tls_vmajor != tls->params.tls_vmajor || in ktls_decrypt()
2558 hdr->tls_vminor != vminor) in ktls_decrypt()
2560 else if (tls13 && hdr->tls_type != TLS_RLTYPE_APP) in ktls_decrypt()
2562 else if (tls_len < tls->params.tls_hlen || tls_len > in ktls_decrypt()
2563 tls->params.tls_hlen + TLS_MAX_MSG_SIZE_V10_2 + in ktls_decrypt()
2564 tls->params.tls_tlen) in ktls_decrypt()
2582 if (sb->sb_tlscc < tls_len) in ktls_decrypt()
2592 MPASS(sb->sb_tlsdcc == tls_len); in ktls_decrypt()
2594 seqno = sb->sb_tls_seqno; in ktls_decrypt()
2595 sb->sb_tls_seqno++; in ktls_decrypt()
2616 record_type = hdr->tls_type; in ktls_decrypt()
2627 trail_len = tls->params.tls_tlen - 1; in ktls_decrypt()
2631 trail_len = tls->params.tls_tlen; in ktls_decrypt()
2633 record_type = hdr->tls_type; in ktls_decrypt()
2644 if (sb->sb_tlsdcc == 0) { in ktls_decrypt()
2657 sb->sb_ccc -= tls_len; in ktls_decrypt()
2658 sb->sb_tlsdcc = 0; in ktls_decrypt()
2662 CURVNET_SET(so->so_vnet); in ktls_decrypt()
2663 so->so_error = error; in ktls_decrypt()
2676 tgr.tls_vmajor = hdr->tls_vmajor; in ktls_decrypt()
2677 tgr.tls_vminor = hdr->tls_vminor; in ktls_decrypt()
2678 tgr.tls_length = htobe16(tls_len - tls->params.tls_hlen - in ktls_decrypt()
2684 if (sb->sb_tlsdcc == 0) { in ktls_decrypt()
2686 MPASS(sb->sb_tlscc == 0); in ktls_decrypt()
2696 sb->sb_ccc -= tls_len; in ktls_decrypt()
2697 sb->sb_tlsdcc = 0; in ktls_decrypt()
2706 remain = tls->params.tls_hlen; in ktls_decrypt()
2708 if (data->m_len > remain) { in ktls_decrypt()
2709 data->m_data += remain; in ktls_decrypt()
2710 data->m_len -= remain; in ktls_decrypt()
2713 remain -= data->m_len; in ktls_decrypt()
2720 for (m = data; remain > m->m_len; m = m->m_next) { in ktls_decrypt()
2721 m->m_flags &= ~(M_NOTREADY | M_DECRYPTED); in ktls_decrypt()
2722 remain -= m->m_len; in ktls_decrypt()
2724 m->m_len = remain; in ktls_decrypt()
2725 m_freem(m->m_next); in ktls_decrypt()
2726 m->m_next = NULL; in ktls_decrypt()
2727 m->m_flags &= ~(M_NOTREADY | M_DECRYPTED); in ktls_decrypt()
2730 m->m_flags |= M_EOR; in ktls_decrypt()
2736 sb->sb_flags |= SB_TLS_RX_RESYNC; in ktls_decrypt()
2740 } else if (__predict_false(sb->sb_flags & SB_TLS_RX_RESYNC)) { in ktls_decrypt()
2741 sb->sb_flags &= ~SB_TLS_RX_RESYNC; in ktls_decrypt()
2748 sb->sb_flags &= ~SB_TLS_RX_RUNNING; in ktls_decrypt()
2750 if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc > 0) in ktls_decrypt()
2751 so->so_error = EMSGSIZE; in ktls_decrypt()
2758 CURVNET_SET(so->so_vnet); in ktls_decrypt()
2770 m->m_epg_flags |= EPG_FLAG_2FREE; in ktls_enqueue_to_free()
2771 wq = &ktls_wq[m->m_epg_tls->wq_index]; in ktls_enqueue_to_free()
2772 mtx_lock(&wq->mtx); in ktls_enqueue_to_free()
2773 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq); in ktls_enqueue_to_free()
2774 running = wq->running; in ktls_enqueue_to_free()
2775 mtx_unlock(&wq->mtx); in ktls_enqueue_to_free()
2786 if (m->m_epg_npgs <= 2) in ktls_buffer_alloc()
2790 if ((u_int)(ticks - wq->lastallocfail) < hz) { in ktls_buffer_alloc()
2792 * Rate-limit allocation attempts after a failure. in ktls_buffer_alloc()
2793 * ktls_buffer_import() will acquire a per-domain mutex to check in ktls_buffer_alloc()
2802 wq->lastallocfail = ticks; in ktls_buffer_alloc()
2827 KASSERT((m->m_flags & (M_EXTPG | M_NOTREADY)) == (M_EXTPG | M_NOTREADY), in ktls_encrypt_record()
2829 KASSERT(ptoa(m->m_epg_npgs) <= ktls_maxlen, in ktls_encrypt_record()
2830 ("page count %d larger than maximum frame length %d", m->m_epg_npgs, in ktls_encrypt_record()
2834 if ((m->m_epg_flags & EPG_FLAG_ANON) != 0) in ktls_encrypt_record()
2838 * For file-backed mbufs (from sendfile), anonymous wired in ktls_encrypt_record()
2841 if ((state->cbuf = ktls_buffer_alloc(wq, m)) != NULL) { in ktls_encrypt_record()
2842 len = ptoa(m->m_epg_npgs - 1) + m->m_epg_last_len - in ktls_encrypt_record()
2843 m->m_epg_1st_off; in ktls_encrypt_record()
2844 state->dst_iov[0].iov_base = (char *)state->cbuf + in ktls_encrypt_record()
2845 m->m_epg_1st_off; in ktls_encrypt_record()
2846 state->dst_iov[0].iov_len = len; in ktls_encrypt_record()
2847 state->parray[0] = DMAP_TO_PHYS((vm_offset_t)state->cbuf); in ktls_encrypt_record()
2850 off = m->m_epg_1st_off; in ktls_encrypt_record()
2851 for (i = 0; i < m->m_epg_npgs; i++, off = 0) { in ktls_encrypt_record()
2855 state->parray[i] = VM_PAGE_TO_PHYS(pg); in ktls_encrypt_record()
2856 state->dst_iov[i].iov_base = in ktls_encrypt_record()
2857 (char *)PHYS_TO_DMAP(state->parray[i]) + off; in ktls_encrypt_record()
2858 state->dst_iov[i].iov_len = len; in ktls_encrypt_record()
2861 KASSERT(i + 1 <= nitems(state->dst_iov), ("dst_iov is too small")); in ktls_encrypt_record()
2862 state->dst_iov[i].iov_base = m->m_epg_trail; in ktls_encrypt_record()
2863 state->dst_iov[i].iov_len = m->m_epg_trllen; in ktls_encrypt_record()
2865 error = ktls_ocf_encrypt(state, tls, m, state->dst_iov, i + 1); in ktls_encrypt_record()
2869 if (state->cbuf != NULL) in ktls_encrypt_record()
2870 uma_zfree(ktls_buffer_zone, state->cbuf); in ktls_encrypt_record()
2872 for (i = 0; i < m->m_epg_npgs; i++) { in ktls_encrypt_record()
2873 pg = PHYS_TO_VM_PAGE(state->parray[i]); in ktls_encrypt_record()
2889 page_count = m->m_epg_enc_cnt; in ktls_batched_records()
2892 page_count -= m->m_epg_nrdy; in ktls_batched_records()
2893 m = m->m_next; in ktls_batched_records()
2907 KASSERT(((m->m_flags & (M_EXTPG | M_NOTREADY)) == in ktls_enqueue()
2912 KASSERT(m->m_epg_tls->mode == TCP_TLS_MODE_SW, ("!SW TLS mbuf")); in ktls_enqueue()
2914 m->m_epg_enc_cnt = page_count; in ktls_enqueue()
2920 m->m_epg_so = so; in ktls_enqueue()
2923 tls = m->m_epg_tls; in ktls_enqueue()
2924 wq = &ktls_wq[tls->wq_index]; in ktls_enqueue()
2925 mtx_lock(&wq->mtx); in ktls_enqueue()
2926 if (__predict_false(tls->sequential_records)) { in ktls_enqueue()
2936 * tls->next_seqno holds the sequence number of the in ktls_enqueue()
2938 * queue. If this next record is not tls->next_seqno, in ktls_enqueue()
2940 * TLS sequence number, into tls->pending_records and in ktls_enqueue()
2943 * If this TLS record matches tls->next_seqno, place in ktls_enqueue()
2945 * tls->pending_records to see if any in ktls_enqueue()
2946 * previously-queued records are now ready for in ktls_enqueue()
2949 if (m->m_epg_seqno != tls->next_seqno) { in ktls_enqueue()
2953 STAILQ_FOREACH(n, &tls->pending_records, m_epg_stailq) { in ktls_enqueue()
2954 if (n->m_epg_seqno > m->m_epg_seqno) in ktls_enqueue()
2959 STAILQ_INSERT_TAIL(&tls->pending_records, m, in ktls_enqueue()
2962 STAILQ_INSERT_HEAD(&tls->pending_records, m, in ktls_enqueue()
2965 STAILQ_INSERT_AFTER(&tls->pending_records, p, m, in ktls_enqueue()
2967 mtx_unlock(&wq->mtx); in ktls_enqueue()
2972 tls->next_seqno += ktls_batched_records(m); in ktls_enqueue()
2973 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq); in ktls_enqueue()
2975 while (!STAILQ_EMPTY(&tls->pending_records)) { in ktls_enqueue()
2978 n = STAILQ_FIRST(&tls->pending_records); in ktls_enqueue()
2979 if (n->m_epg_seqno != tls->next_seqno) in ktls_enqueue()
2983 STAILQ_REMOVE_HEAD(&tls->pending_records, m_epg_stailq); in ktls_enqueue()
2984 tls->next_seqno += ktls_batched_records(n); in ktls_enqueue()
2985 STAILQ_INSERT_TAIL(&wq->m_head, n, m_epg_stailq); in ktls_enqueue()
2987 counter_u64_add(ktls_cnt_tx_pending, -(queued - 1)); in ktls_enqueue()
2989 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq); in ktls_enqueue()
2991 running = wq->running; in ktls_enqueue()
2992 mtx_unlock(&wq->mtx); in ktls_enqueue()
2999 * Once a file-backed mbuf (from sendfile) has been encrypted, free
3008 MPASS((m->m_epg_flags & EPG_FLAG_ANON) == 0); in ktls_finish_nonanon()
3011 m->m_ext.ext_free(m); in ktls_finish_nonanon()
3014 if (state->cbuf != NULL) { in ktls_finish_nonanon()
3015 for (i = 0; i < m->m_epg_npgs; i++) in ktls_finish_nonanon()
3016 m->m_epg_pa[i] = state->parray[0] + ptoa(i); in ktls_finish_nonanon()
3019 m->m_ext.ext_free = ktls_free_mext_contig; in ktls_finish_nonanon()
3021 for (i = 0; i < m->m_epg_npgs; i++) in ktls_finish_nonanon()
3022 m->m_epg_pa[i] = state->parray[i]; in ktls_finish_nonanon()
3025 m->m_ext.ext_free = mb_free_mext_pgs; in ktls_finish_nonanon()
3029 m->m_epg_flags |= EPG_FLAG_ANON; in ktls_finish_nonanon()
3041 so = top->m_epg_so; in ktls_encrypt()
3042 tls = top->m_epg_tls; in ktls_encrypt()
3046 top->m_epg_so = NULL; in ktls_encrypt()
3048 total_pages = top->m_epg_enc_cnt; in ktls_encrypt()
3069 for (m = top; npages != total_pages; m = m->m_next) { in ktls_encrypt()
3070 KASSERT(m->m_epg_tls == tls, in ktls_encrypt()
3072 tls, m->m_epg_tls)); in ktls_encrypt()
3073 KASSERT(npages + m->m_epg_npgs <= total_pages, in ktls_encrypt()
3083 if ((m->m_epg_flags & EPG_FLAG_ANON) == 0) in ktls_encrypt()
3085 m->m_flags |= M_RDONLY; in ktls_encrypt()
3087 npages += m->m_epg_nrdy; in ktls_encrypt()
3093 * yet-to-be-encrypted records having an associated in ktls_encrypt()
3096 m->m_epg_tls = NULL; in ktls_encrypt()
3100 CURVNET_SET(so->so_vnet); in ktls_encrypt()
3102 (void)so->so_proto->pr_ready(so, top, npages); in ktls_encrypt()
3120 m = state->m; in ktls_encrypt_cb()
3122 if ((m->m_epg_flags & EPG_FLAG_ANON) == 0) in ktls_encrypt_cb()
3124 m->m_flags |= M_RDONLY; in ktls_encrypt_cb()
3126 so = state->so; in ktls_encrypt_cb()
3132 * no associated session vs yet-to-be-encrypted records having in ktls_encrypt_cb()
3135 tls = m->m_epg_tls; in ktls_encrypt_cb()
3136 m->m_epg_tls = NULL; in ktls_encrypt_cb()
3142 CURVNET_SET(so->so_vnet); in ktls_encrypt_cb()
3143 npages = m->m_epg_nrdy; in ktls_encrypt_cb()
3146 (void)so->so_proto->pr_ready(so, m, npages); in ktls_encrypt_cb()
3170 so = top->m_epg_so; in ktls_encrypt_async()
3171 tls = top->m_epg_tls; in ktls_encrypt_async()
3175 top->m_epg_so = NULL; in ktls_encrypt_async()
3177 total_pages = top->m_epg_enc_cnt; in ktls_encrypt_async()
3182 KASSERT(m->m_epg_tls == tls, in ktls_encrypt_async()
3184 tls, m->m_epg_tls)); in ktls_encrypt_async()
3185 KASSERT(npages + m->m_epg_npgs <= total_pages, in ktls_encrypt_async()
3191 state->so = so; in ktls_encrypt_async()
3192 state->m = m; in ktls_encrypt_async()
3194 mpages = m->m_epg_nrdy; in ktls_encrypt_async()
3195 n = m->m_next; in ktls_encrypt_async()
3201 CURVNET_SET(so->so_vnet); in ktls_encrypt_async()
3210 CURVNET_SET(so->so_vnet); in ktls_encrypt_async()
3213 mb_free_notready(m, total_pages - npages); in ktls_encrypt_async()
3225 error = cpuset_setthread(curthread->td_tid, &cpuset_domain[domain]); in ktls_bind_domain()
3228 curthread->td_domain.dr_policy = DOMAINSET_PREF(domain); in ktls_bind_domain()
3236 struct ktls_reclaim_thread *sc = &ktls_domain->reclaim_td; in ktls_reclaim_thread()
3241 domain = ktls_domain - ktls_domains; in ktls_reclaim_thread()
3252 CTLFLAG_RD, &sc->reclaims, 0, "buffers reclaimed"); in ktls_reclaim_thread()
3254 CTLFLAG_RD, &sc->wakeups, 0, "thread wakeups"); in ktls_reclaim_thread()
3256 CTLFLAG_RD, &sc->running, 0, "thread running"); in ktls_reclaim_thread()
3259 atomic_store_int(&sc->running, 0); in ktls_reclaim_thread()
3260 tsleep(sc, PZERO | PNOLOCK, "-", 0); in ktls_reclaim_thread()
3261 atomic_store_int(&sc->running, 1); in ktls_reclaim_thread()
3262 sc->wakeups++; in ktls_reclaim_thread()
3278 sc->reclaims += ktls_max_reclaim; in ktls_reclaim_thread()
3293 cpu = wq - ktls_wq; in ktls_work_thread()
3307 error = ktls_bind_domain(pc->pc_domain); in ktls_work_thread()
3312 error = cpuset_setthread(curthread->td_tid, &mask); in ktls_work_thread()
3322 mtx_lock(&wq->mtx); in ktls_work_thread()
3323 while (STAILQ_EMPTY(&wq->m_head) && in ktls_work_thread()
3324 STAILQ_EMPTY(&wq->so_head)) { in ktls_work_thread()
3325 wq->running = false; in ktls_work_thread()
3326 mtx_sleep(wq, &wq->mtx, 0, "-", 0); in ktls_work_thread()
3327 wq->running = true; in ktls_work_thread()
3331 STAILQ_CONCAT(&local_m_head, &wq->m_head); in ktls_work_thread()
3333 STAILQ_CONCAT(&local_so_head, &wq->so_head); in ktls_work_thread()
3334 mtx_unlock(&wq->mtx); in ktls_work_thread()
3337 if (m->m_epg_flags & EPG_FLAG_2FREE) { in ktls_work_thread()
3338 ktls_free(m->m_epg_tls); in ktls_work_thread()
3341 if (m->m_epg_tls->sync_dispatch) in ktls_work_thread()
3345 counter_u64_add(ktls_cnt_tx_queued, -1); in ktls_work_thread()
3351 counter_u64_add(ktls_cnt_rx_queued, -1); in ktls_work_thread()
3366 inp = tls->inp; in ktls_disable_ifnet_help()
3370 so = inp->inp_socket; in ktls_disable_ifnet_help()
3372 if (inp->inp_flags & INP_DROPPED) { in ktls_disable_ifnet_help()
3376 if (so->so_snd.sb_tls_info != NULL) in ktls_disable_ifnet_help()
3383 if ((inp->inp_flags & INP_DROPPED) == 0 && in ktls_disable_ifnet_help()
3385 tp->t_fb->tfb_hwtls_change != NULL) in ktls_disable_ifnet_help()
3386 (*tp->t_fb->tfb_hwtls_change)(tp, 0); in ktls_disable_ifnet_help()
3392 CURVNET_SET(so->so_vnet); in ktls_disable_ifnet_help()
3400 * Called when re-transmits are becoming a substantial portion of the
3403 * NICs keep crypto state only for in-order transmits. This means
3404 * that to handle a TCP rexmit (which is out-of-order), the NIC must
3405 * re-DMA the entire TLS record up to and including the current
3406 * segment. This means that when re-transmitting the last ~1448 byte
3407 * segment of a 16KB TLS record, we could wind up re-DMA'ing an order
3423 so = inp->inp_socket; in ktls_disable_ifnet()
3425 tls = so->so_snd.sb_tls_info; in ktls_disable_ifnet()
3426 if (tp->t_nic_ktls_xmit_dis == 1) { in ktls_disable_ifnet()
3439 tp->t_nic_ktls_xmit_dis = 1; in ktls_disable_ifnet()
3441 TASK_INIT(&tls->disable_ifnet_task, 0, ktls_disable_ifnet_help, tls); in ktls_disable_ifnet()
3442 (void)taskqueue_enqueue(taskqueue_thread, &tls->disable_ifnet_task); in ktls_disable_ifnet()
3452 xk->gen = ktls->gen; in ktls_session_to_xktls_onedir()
3453 #define A(m) xk->m = ktls->params.m in ktls_session_to_xktls_onedir()
3466 memcpy(&xk->iv, &ktls->params.iv, XKTLS_SESSION_IV_BUF_LEN); in ktls_session_to_xktls_onedir()
3469 memset(&xk->iv, 0, XKTLS_SESSION_IV_BUF_LEN); in ktls_session_to_xktls_onedir()
3470 xk->iv_len = 0; in ktls_session_to_xktls_onedir()
3473 if ((st = ktls->snd_tag) != NULL && in ktls_session_to_xktls_onedir()
3474 (ifp = ktls->snd_tag->ifp) != NULL) in ktls_session_to_xktls_onedir()
3475 strncpy(xk->ifnet, if_name(ifp), sizeof(xk->ifnet)); in ktls_session_to_xktls_onedir()
3489 tc = MIN(t, ktls->params.cipher_key_len); in ktls_session_copy_keys()
3491 memcpy(data, ktls->params.cipher_key, tc); in ktls_session_copy_keys()
3492 ta = MIN(t - tc, ktls->params.auth_key_len); in ktls_session_copy_keys()
3494 memcpy(data + tc, ktls->params.auth_key, ta); in ktls_session_copy_keys()