1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2017-2018 Chelsio Communications, Inc.
5 * All rights reserved.
6 * Written by: John Baldwin <jhb@FreeBSD.org>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include "opt_inet.h"
31 #include "opt_kern_tls.h"
32
33 #include <sys/cdefs.h>
34 #ifdef KERN_TLS
35 #include <sys/param.h>
36 #include <sys/ktr.h>
37 #include <sys/ktls.h>
38 #include <sys/sglist.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/systm.h>
42 #include <netinet/in.h>
43 #include <netinet/in_pcb.h>
44 #include <netinet/tcp_var.h>
45 #include <netinet/toecore.h>
46 #include <opencrypto/cryptodev.h>
47 #include <opencrypto/xform.h>
48
49 #ifdef TCP_OFFLOAD
50 #include "common/common.h"
51 #include "common/t4_tcb.h"
52 #include "crypto/t4_crypto.h"
53 #include "tom/t4_tom_l2t.h"
54 #include "tom/t4_tom.h"
55
56 /*
57 * The TCP sequence number of a CPL_TLS_DATA mbuf is saved here while
58 * the mbuf is in the ulp_pdu_reclaimq.
59 */
60 #define tls_tcp_seq PH_loc.thirtytwo[0]
61
62 static void
t4_set_tls_tcb_field(struct toepcb * toep,uint16_t word,uint64_t mask,uint64_t val,int reply,int cookie)63 t4_set_tls_tcb_field(struct toepcb *toep, uint16_t word, uint64_t mask,
64 uint64_t val, int reply, int cookie)
65 {
66 struct adapter *sc = td_adapter(toep->td);
67 struct mbuf *m;
68
69 m = alloc_raw_wr_mbuf(sizeof(struct cpl_set_tcb_field));
70 if (m == NULL) {
71 /* XXX */
72 panic("%s: out of memory", __func__);
73 }
74
75 write_set_tcb_field(sc, mtod(m, void *), toep, word, mask, val, reply,
76 cookie);
77
78 t4_raw_wr_tx(sc, toep, m);
79 }
80
81 /* TLS and DTLS common routines */
82 bool
can_tls_offload(struct adapter * sc)83 can_tls_offload(struct adapter *sc)
84 {
85
86 return (sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS);
87 }
88
89 int
tls_tx_key(struct toepcb * toep)90 tls_tx_key(struct toepcb *toep)
91 {
92 struct tls_ofld_info *tls_ofld = &toep->tls;
93
94 return (tls_ofld->tx_key_addr >= 0);
95 }
96
97 /* Set TF_RX_QUIESCE to pause receive. */
98 static void
t4_set_rx_quiesce(struct toepcb * toep)99 t4_set_rx_quiesce(struct toepcb *toep)
100 {
101
102 t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1),
103 V_TF_RX_QUIESCE(1), 1, CPL_COOKIE_TOM);
104 }
105
106 /* Clear TF_RX_QUIESCE to re-enable receive. */
107 static void
t4_clear_rx_quiesce(struct toepcb * toep)108 t4_clear_rx_quiesce(struct toepcb *toep)
109 {
110
111 t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1), 0, 0, 0);
112 }
113
114 /* TLS/DTLS content type for CPL SFO */
115 static inline unsigned char
tls_content_type(unsigned char content_type)116 tls_content_type(unsigned char content_type)
117 {
118 switch (content_type) {
119 case CONTENT_TYPE_CCS:
120 return CPL_TX_TLS_SFO_TYPE_CCS;
121 case CONTENT_TYPE_ALERT:
122 return CPL_TX_TLS_SFO_TYPE_ALERT;
123 case CONTENT_TYPE_HANDSHAKE:
124 return CPL_TX_TLS_SFO_TYPE_HANDSHAKE;
125 case CONTENT_TYPE_APP_DATA:
126 return CPL_TX_TLS_SFO_TYPE_DATA;
127 default:
128 return CPL_TX_TLS_SFO_TYPE_CUSTOM;
129 }
130 }
131
132 /* TLS Key memory management */
133 static void
clear_tls_keyid(struct toepcb * toep)134 clear_tls_keyid(struct toepcb *toep)
135 {
136 struct tls_ofld_info *tls_ofld = &toep->tls;
137 struct adapter *sc = td_adapter(toep->td);
138
139 if (tls_ofld->rx_key_addr >= 0) {
140 t4_free_tls_keyid(sc, tls_ofld->rx_key_addr);
141 tls_ofld->rx_key_addr = -1;
142 }
143 if (tls_ofld->tx_key_addr >= 0) {
144 t4_free_tls_keyid(sc, tls_ofld->tx_key_addr);
145 tls_ofld->tx_key_addr = -1;
146 }
147 }
148
149 static int
get_tp_plen_max(struct ktls_session * tls)150 get_tp_plen_max(struct ktls_session *tls)
151 {
152 int plen = ((min(3*4096, TP_TX_PG_SZ))/1448) * 1448;
153
154 return (tls->params.max_frame_len <= 8192 ? plen : FC_TP_PLEN_MAX);
155 }
156
157 /* Send request to save the key in on-card memory. */
158 static int
tls_program_key_id(struct toepcb * toep,struct ktls_session * tls,int direction)159 tls_program_key_id(struct toepcb *toep, struct ktls_session *tls,
160 int direction)
161 {
162 struct tls_ofld_info *tls_ofld = &toep->tls;
163 struct adapter *sc = td_adapter(toep->td);
164 int keyid;
165 struct mbuf *m;
166 struct tls_key_req *kwr;
167 struct tls_keyctx *kctx;
168
169 #ifdef INVARIANTS
170 int kwrlen, kctxlen, len;
171
172 kwrlen = sizeof(*kwr);
173 kctxlen = roundup2(sizeof(*kctx), 32);
174 len = roundup2(kwrlen + kctxlen, 16);
175 MPASS(TLS_KEY_WR_SZ == len);
176 #endif
177 if (toep->txsd_avail == 0)
178 return (EAGAIN);
179
180 if ((keyid = t4_alloc_tls_keyid(sc)) < 0) {
181 return (ENOSPC);
182 }
183
184 m = alloc_raw_wr_mbuf(TLS_KEY_WR_SZ);
185 if (m == NULL) {
186 t4_free_tls_keyid(sc, keyid);
187 return (ENOMEM);
188 }
189 kwr = mtod(m, struct tls_key_req *);
190 memset(kwr, 0, TLS_KEY_WR_SZ);
191
192 t4_write_tlskey_wr(tls, direction, toep->tid, F_FW_WR_COMPL, keyid,
193 kwr);
194 kctx = (struct tls_keyctx *)(kwr + 1);
195 if (direction == KTLS_TX)
196 tls_ofld->tx_key_addr = keyid;
197 else
198 tls_ofld->rx_key_addr = keyid;
199 t4_tls_key_ctx(tls, direction, kctx);
200
201 t4_raw_wr_tx(sc, toep, m);
202
203 return (0);
204 }
205
206 int
tls_alloc_ktls(struct toepcb * toep,struct ktls_session * tls,int direction)207 tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
208 {
209 struct adapter *sc = td_adapter(toep->td);
210 int error, explicit_iv_size, mac_first;
211
212 if (!can_tls_offload(sc))
213 return (EINVAL);
214
215 if (direction == KTLS_RX) {
216 if (ulp_mode(toep) != ULP_MODE_NONE)
217 return (EINVAL);
218 if ((toep->flags & TPF_TLS_STARTING) != 0)
219 return (EINVAL);
220 } else {
221 switch (ulp_mode(toep)) {
222 case ULP_MODE_NONE:
223 case ULP_MODE_TLS:
224 case ULP_MODE_TCPDDP:
225 break;
226 default:
227 return (EINVAL);
228 }
229 }
230
231 switch (tls->params.cipher_algorithm) {
232 case CRYPTO_AES_CBC:
233 /* XXX: Explicitly ignore any provided IV. */
234 switch (tls->params.cipher_key_len) {
235 case 128 / 8:
236 case 192 / 8:
237 case 256 / 8:
238 break;
239 default:
240 return (EINVAL);
241 }
242 switch (tls->params.auth_algorithm) {
243 case CRYPTO_SHA1_HMAC:
244 case CRYPTO_SHA2_256_HMAC:
245 case CRYPTO_SHA2_384_HMAC:
246 break;
247 default:
248 return (EPROTONOSUPPORT);
249 }
250 explicit_iv_size = AES_BLOCK_LEN;
251 mac_first = 1;
252 break;
253 case CRYPTO_AES_NIST_GCM_16:
254 if (tls->params.iv_len != SALT_SIZE) {
255 return (EINVAL);
256 }
257 switch (tls->params.cipher_key_len) {
258 case 128 / 8:
259 case 192 / 8:
260 case 256 / 8:
261 break;
262 default:
263 return (EINVAL);
264 }
265 explicit_iv_size = 8;
266 mac_first = 0;
267 break;
268 default:
269 return (EPROTONOSUPPORT);
270 }
271
272 /* Only TLS 1.1 and TLS 1.2 are currently supported. */
273 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
274 tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
275 tls->params.tls_vminor > TLS_MINOR_VER_TWO) {
276 return (EPROTONOSUPPORT);
277 }
278
279 /* Bail if we already have a key. */
280 if (direction == KTLS_TX) {
281 if (toep->tls.tx_key_addr != -1)
282 return (EOPNOTSUPP);
283 } else {
284 if (toep->tls.rx_key_addr != -1)
285 return (EOPNOTSUPP);
286 }
287
288 error = tls_program_key_id(toep, tls, direction);
289 if (error)
290 return (error);
291
292 if (direction == KTLS_TX) {
293 toep->tls.scmd0.seqno_numivs =
294 (V_SCMD_SEQ_NO_CTRL(3) |
295 V_SCMD_PROTO_VERSION(t4_tls_proto_ver(tls)) |
296 V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
297 V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
298 V_SCMD_CIPH_MODE(t4_tls_cipher_mode(tls)) |
299 V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
300 V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
301 V_SCMD_IV_SIZE(explicit_iv_size / 2));
302
303 toep->tls.scmd0.ivgen_hdrlen =
304 (V_SCMD_IV_GEN_CTRL(1) |
305 V_SCMD_KEY_CTX_INLINE(0) |
306 V_SCMD_TLS_FRAG_ENABLE(1));
307
308 toep->tls.iv_len = explicit_iv_size;
309 toep->tls.frag_size = tls->params.max_frame_len;
310 toep->tls.fcplenmax = get_tp_plen_max(tls);
311 toep->tls.expn_per_ulp = tls->params.tls_hlen +
312 tls->params.tls_tlen;
313 toep->tls.pdus_per_ulp = 1;
314 toep->tls.adjusted_plen = toep->tls.expn_per_ulp +
315 tls->params.max_frame_len;
316 toep->tls.tx_key_info_size = t4_tls_key_info_size(tls);
317 } else {
318 toep->flags |= TPF_TLS_STARTING | TPF_TLS_RX_QUIESCING;
319 toep->tls.rx_version = tls->params.tls_vmajor << 8 |
320 tls->params.tls_vminor;
321
322 CTR2(KTR_CXGBE, "%s: tid %d setting RX_QUIESCE", __func__,
323 toep->tid);
324 t4_set_rx_quiesce(toep);
325 }
326
327 return (0);
328 }
329
330 void
tls_init_toep(struct toepcb * toep)331 tls_init_toep(struct toepcb *toep)
332 {
333 struct tls_ofld_info *tls_ofld = &toep->tls;
334
335 tls_ofld->rx_key_addr = -1;
336 tls_ofld->tx_key_addr = -1;
337 }
338
339 void
tls_uninit_toep(struct toepcb * toep)340 tls_uninit_toep(struct toepcb *toep)
341 {
342
343 clear_tls_keyid(toep);
344 }
345
346 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16)
347 #define MIN_OFLD_TLSTX_CREDITS(toep) \
348 (howmany(sizeof(struct fw_tlstx_data_wr) + \
349 sizeof(struct cpl_tx_tls_sfo) + sizeof(struct ulptx_idata) + \
350 sizeof(struct ulptx_sc_memrd) + \
351 AES_BLOCK_LEN + 1, 16))
352
353 static void
write_tlstx_wr(struct fw_tlstx_data_wr * txwr,struct toepcb * toep,unsigned int plen,unsigned int expn,uint8_t credits,int shove)354 write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
355 unsigned int plen, unsigned int expn, uint8_t credits, int shove)
356 {
357 struct tls_ofld_info *tls_ofld = &toep->tls;
358 unsigned int len = plen + expn;
359
360 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_TLSTX_DATA_WR) |
361 V_FW_TLSTX_DATA_WR_COMPL(1) |
362 V_FW_TLSTX_DATA_WR_IMMDLEN(0));
363 txwr->flowid_len16 = htobe32(V_FW_TLSTX_DATA_WR_FLOWID(toep->tid) |
364 V_FW_TLSTX_DATA_WR_LEN16(credits));
365 txwr->plen = htobe32(len);
366 txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ULP_MODE_TLS) |
367 V_TX_URG(0) | /* F_T6_TX_FORCE | */ V_TX_SHOVE(shove));
368 txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(1) |
369 V_FW_TLSTX_DATA_WR_EXP(expn) |
370 V_FW_TLSTX_DATA_WR_CTXLOC(TLS_SFO_WR_CONTEXTLOC_DDR) |
371 V_FW_TLSTX_DATA_WR_IVDSGL(0) |
372 V_FW_TLSTX_DATA_WR_KEYSIZE(tls_ofld->tx_key_info_size >> 4));
373 txwr->mfs = htobe16(tls_ofld->frag_size);
374 txwr->adjustedplen_pkd = htobe16(
375 V_FW_TLSTX_DATA_WR_ADJUSTEDPLEN(tls_ofld->adjusted_plen));
376 txwr->expinplenmax_pkd = htobe16(
377 V_FW_TLSTX_DATA_WR_EXPINPLENMAX(tls_ofld->expn_per_ulp));
378 txwr->pdusinplenmax_pkd =
379 V_FW_TLSTX_DATA_WR_PDUSINPLENMAX(tls_ofld->pdus_per_ulp);
380 }
381
382 static void
write_tlstx_cpl(struct cpl_tx_tls_sfo * cpl,struct toepcb * toep,struct tls_hdr * tls_hdr,unsigned int plen,uint64_t seqno)383 write_tlstx_cpl(struct cpl_tx_tls_sfo *cpl, struct toepcb *toep,
384 struct tls_hdr *tls_hdr, unsigned int plen, uint64_t seqno)
385 {
386 struct tls_ofld_info *tls_ofld = &toep->tls;
387 int data_type, seglen;
388
389 seglen = plen;
390 data_type = tls_content_type(tls_hdr->type);
391 cpl->op_to_seg_len = htobe32(V_CPL_TX_TLS_SFO_OPCODE(CPL_TX_TLS_SFO) |
392 V_CPL_TX_TLS_SFO_DATA_TYPE(data_type) |
393 V_CPL_TX_TLS_SFO_CPL_LEN(2) | V_CPL_TX_TLS_SFO_SEG_LEN(seglen));
394 cpl->pld_len = htobe32(plen);
395 if (data_type == CPL_TX_TLS_SFO_TYPE_CUSTOM)
396 cpl->type_protover = htobe32(
397 V_CPL_TX_TLS_SFO_TYPE(tls_hdr->type));
398 cpl->seqno_numivs = htobe32(tls_ofld->scmd0.seqno_numivs |
399 V_SCMD_NUM_IVS(1));
400 cpl->ivgen_hdrlen = htobe32(tls_ofld->scmd0.ivgen_hdrlen);
401 cpl->scmd1 = htobe64(seqno);
402 }
403
404 static int
count_ext_pgs_segs(struct mbuf * m)405 count_ext_pgs_segs(struct mbuf *m)
406 {
407 vm_paddr_t nextpa;
408 u_int i, nsegs;
409
410 MPASS(m->m_epg_npgs > 0);
411 nsegs = 1;
412 nextpa = m->m_epg_pa[0] + PAGE_SIZE;
413 for (i = 1; i < m->m_epg_npgs; i++) {
414 if (nextpa != m->m_epg_pa[i])
415 nsegs++;
416 nextpa = m->m_epg_pa[i] + PAGE_SIZE;
417 }
418 return (nsegs);
419 }
420
421 static void
write_ktlstx_sgl(void * dst,struct mbuf * m,int nsegs)422 write_ktlstx_sgl(void *dst, struct mbuf *m, int nsegs)
423 {
424 struct ulptx_sgl *usgl = dst;
425 vm_paddr_t pa;
426 uint32_t len;
427 int i, j;
428
429 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__));
430
431 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
432 V_ULPTX_NSGE(nsegs));
433
434 /* Figure out the first S/G length. */
435 pa = m->m_epg_pa[0] + m->m_epg_1st_off;
436 usgl->addr0 = htobe64(pa);
437 len = m_epg_pagelen(m, 0, m->m_epg_1st_off);
438 pa += len;
439 for (i = 1; i < m->m_epg_npgs; i++) {
440 if (m->m_epg_pa[i] != pa)
441 break;
442 len += m_epg_pagelen(m, i, 0);
443 pa += m_epg_pagelen(m, i, 0);
444 }
445 usgl->len0 = htobe32(len);
446 #ifdef INVARIANTS
447 nsegs--;
448 #endif
449
450 j = -1;
451 for (; i < m->m_epg_npgs; i++) {
452 if (j == -1 || m->m_epg_pa[i] != pa) {
453 if (j >= 0)
454 usgl->sge[j / 2].len[j & 1] = htobe32(len);
455 j++;
456 #ifdef INVARIANTS
457 nsegs--;
458 #endif
459 pa = m->m_epg_pa[i];
460 usgl->sge[j / 2].addr[j & 1] = htobe64(pa);
461 len = m_epg_pagelen(m, i, 0);
462 pa += len;
463 } else {
464 len += m_epg_pagelen(m, i, 0);
465 pa += m_epg_pagelen(m, i, 0);
466 }
467 }
468 if (j >= 0) {
469 usgl->sge[j / 2].len[j & 1] = htobe32(len);
470
471 if ((j & 1) == 0)
472 usgl->sge[j / 2].len[1] = htobe32(0);
473 }
474 KASSERT(nsegs == 0, ("%s: nsegs %d, m %p", __func__, nsegs, m));
475 }
476
477 /*
478 * Similar to t4_push_frames() but handles sockets that contain TLS
479 * record mbufs.
480 */
481 void
t4_push_ktls(struct adapter * sc,struct toepcb * toep,int drop)482 t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
483 {
484 struct tls_hdr *thdr;
485 struct fw_tlstx_data_wr *txwr;
486 struct cpl_tx_tls_sfo *cpl;
487 struct ulptx_idata *idata;
488 struct ulptx_sc_memrd *memrd;
489 struct wrqe *wr;
490 struct mbuf *m;
491 u_int nsegs, credits, wr_len;
492 u_int expn_size;
493 struct inpcb *inp = toep->inp;
494 struct tcpcb *tp = intotcpcb(inp);
495 struct socket *so = inp->inp_socket;
496 struct sockbuf *sb = &so->so_snd;
497 struct mbufq *pduq = &toep->ulp_pduq;
498 int tls_size, tx_credits, shove, sowwakeup;
499 struct ofld_tx_sdesc *txsd;
500 char *buf;
501
502 INP_WLOCK_ASSERT(inp);
503 KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
504 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
505
506 KASSERT(ulp_mode(toep) == ULP_MODE_NONE ||
507 ulp_mode(toep) == ULP_MODE_TCPDDP || ulp_mode(toep) == ULP_MODE_TLS,
508 ("%s: ulp_mode %u for toep %p", __func__, ulp_mode(toep), toep));
509 KASSERT(tls_tx_key(toep),
510 ("%s: TX key not set for toep %p", __func__, toep));
511
512 #ifdef VERBOSE_TRACES
513 CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d",
514 __func__, toep->tid, toep->flags, tp->t_flags);
515 #endif
516 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
517 return;
518
519 #ifdef RATELIMIT
520 if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) &&
521 (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) {
522 inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED;
523 }
524 #endif
525
526 /*
527 * This function doesn't resume by itself. Someone else must clear the
528 * flag and call this function.
529 */
530 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
531 KASSERT(drop == 0,
532 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
533 return;
534 }
535
536 txsd = &toep->txsd[toep->txsd_pidx];
537 for (;;) {
538 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
539
540 if (__predict_false((m = mbufq_first(pduq)) != NULL)) {
541 if (!t4_push_raw_wr(sc, toep, m)) {
542 toep->flags |= TPF_TX_SUSPENDED;
543 return;
544 }
545
546 (void)mbufq_dequeue(pduq);
547
548 txsd = &toep->txsd[toep->txsd_pidx];
549 continue;
550 }
551
552 SOCKBUF_LOCK(sb);
553 sowwakeup = drop;
554 if (drop) {
555 sbdrop_locked(sb, drop);
556 drop = 0;
557 }
558
559 m = sb->sb_sndptr != NULL ? sb->sb_sndptr->m_next : sb->sb_mb;
560
561 /*
562 * Send a FIN if requested, but only if there's no
563 * more data to send.
564 */
565 if (m == NULL && toep->flags & TPF_SEND_FIN) {
566 if (sowwakeup)
567 sowwakeup_locked(so);
568 else
569 SOCKBUF_UNLOCK(sb);
570 SOCKBUF_UNLOCK_ASSERT(sb);
571 t4_close_conn(sc, toep);
572 return;
573 }
574
575 /*
576 * If there is no ready data to send, wait until more
577 * data arrives.
578 */
579 if (m == NULL || (m->m_flags & M_NOTREADY) != 0) {
580 if (sowwakeup)
581 sowwakeup_locked(so);
582 else
583 SOCKBUF_UNLOCK(sb);
584 SOCKBUF_UNLOCK_ASSERT(sb);
585 #ifdef VERBOSE_TRACES
586 CTR2(KTR_CXGBE, "%s: tid %d no ready data to send",
587 __func__, toep->tid);
588 #endif
589 return;
590 }
591
592 KASSERT(m->m_flags & M_EXTPG, ("%s: mbuf %p is not NOMAP",
593 __func__, m));
594 KASSERT(m->m_epg_tls != NULL,
595 ("%s: mbuf %p doesn't have TLS session", __func__, m));
596
597 /* Calculate WR length. */
598 wr_len = sizeof(struct fw_tlstx_data_wr) +
599 sizeof(struct cpl_tx_tls_sfo) +
600 sizeof(struct ulptx_idata) + sizeof(struct ulptx_sc_memrd);
601
602 /* Explicit IVs for AES-CBC and AES-GCM are <= 16. */
603 MPASS(toep->tls.iv_len <= AES_BLOCK_LEN);
604 wr_len += AES_BLOCK_LEN;
605
606 /* Account for SGL in work request length. */
607 nsegs = count_ext_pgs_segs(m);
608 wr_len += sizeof(struct ulptx_sgl) +
609 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
610
611 /* Not enough credits for this work request. */
612 if (howmany(wr_len, 16) > tx_credits) {
613 if (sowwakeup)
614 sowwakeup_locked(so);
615 else
616 SOCKBUF_UNLOCK(sb);
617 SOCKBUF_UNLOCK_ASSERT(sb);
618 #ifdef VERBOSE_TRACES
619 CTR5(KTR_CXGBE,
620 "%s: tid %d mbuf %p requires %d credits, but only %d available",
621 __func__, toep->tid, m, howmany(wr_len, 16),
622 tx_credits);
623 #endif
624 toep->flags |= TPF_TX_SUSPENDED;
625 return;
626 }
627
628 /* Shove if there is no additional data pending. */
629 shove = ((m->m_next == NULL ||
630 (m->m_next->m_flags & M_NOTREADY) != 0)) &&
631 (tp->t_flags & TF_MORETOCOME) == 0;
632
633 if (sb->sb_flags & SB_AUTOSIZE &&
634 V_tcp_do_autosndbuf &&
635 sb->sb_hiwat < V_tcp_autosndbuf_max &&
636 sbused(sb) >= sb->sb_hiwat * 7 / 8) {
637 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc,
638 V_tcp_autosndbuf_max);
639
640 if (!sbreserve_locked(so, SO_SND, newsize, NULL))
641 sb->sb_flags &= ~SB_AUTOSIZE;
642 else
643 sowwakeup = 1; /* room available */
644 }
645 if (sowwakeup)
646 sowwakeup_locked(so);
647 else
648 SOCKBUF_UNLOCK(sb);
649 SOCKBUF_UNLOCK_ASSERT(sb);
650
651 if (__predict_false(toep->flags & TPF_FIN_SENT))
652 panic("%s: excess tx.", __func__);
653
654 wr = alloc_wrqe(roundup2(wr_len, 16), &toep->ofld_txq->wrq);
655 if (wr == NULL) {
656 /* XXX: how will we recover from this? */
657 toep->flags |= TPF_TX_SUSPENDED;
658 return;
659 }
660
661 thdr = (struct tls_hdr *)&m->m_epg_hdr;
662 #ifdef VERBOSE_TRACES
663 CTR5(KTR_CXGBE, "%s: tid %d TLS record %ju type %d len %#x",
664 __func__, toep->tid, m->m_epg_seqno, thdr->type,
665 m->m_len);
666 #endif
667 txwr = wrtod(wr);
668 cpl = (struct cpl_tx_tls_sfo *)(txwr + 1);
669 memset(txwr, 0, roundup2(wr_len, 16));
670 credits = howmany(wr_len, 16);
671 expn_size = m->m_epg_hdrlen +
672 m->m_epg_trllen;
673 tls_size = m->m_len - expn_size;
674 write_tlstx_wr(txwr, toep, tls_size, expn_size, credits, shove);
675 write_tlstx_cpl(cpl, toep, thdr, tls_size, m->m_epg_seqno);
676
677 idata = (struct ulptx_idata *)(cpl + 1);
678 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
679 idata->len = htobe32(0);
680 memrd = (struct ulptx_sc_memrd *)(idata + 1);
681 memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) |
682 V_ULP_TX_SC_MORE(1) |
683 V_ULPTX_LEN16(toep->tls.tx_key_info_size >> 4));
684 memrd->addr = htobe32(toep->tls.tx_key_addr >> 5);
685
686 /* Copy IV. */
687 buf = (char *)(memrd + 1);
688 memcpy(buf, thdr + 1, toep->tls.iv_len);
689 buf += AES_BLOCK_LEN;
690
691 write_ktlstx_sgl(buf, m, nsegs);
692
693 KASSERT(toep->tx_credits >= credits,
694 ("%s: not enough credits", __func__));
695
696 toep->tx_credits -= credits;
697
698 tp->snd_nxt += m->m_len;
699 tp->snd_max += m->m_len;
700
701 SOCKBUF_LOCK(sb);
702 sb->sb_sndptr = m;
703 SOCKBUF_UNLOCK(sb);
704
705 toep->flags |= TPF_TX_DATA_SENT;
706 if (toep->tx_credits < MIN_OFLD_TLSTX_CREDITS(toep))
707 toep->flags |= TPF_TX_SUSPENDED;
708
709 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
710 KASSERT(m->m_len <= MAX_OFLD_TX_SDESC_PLEN,
711 ("%s: plen %u too large", __func__, m->m_len));
712 txsd->plen = m->m_len;
713 txsd->tx_credits = credits;
714 txsd++;
715 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
716 toep->txsd_pidx = 0;
717 txsd = &toep->txsd[0];
718 }
719 toep->txsd_avail--;
720
721 counter_u64_add(toep->ofld_txq->tx_toe_tls_records, 1);
722 counter_u64_add(toep->ofld_txq->tx_toe_tls_octets, m->m_len);
723
724 t4_l2t_send(sc, wr, toep->l2te);
725 }
726 }
727
728 /*
729 * For TLS data we place received mbufs received via CPL_TLS_DATA into
730 * an mbufq in the TLS offload state. When CPL_RX_TLS_CMP is
731 * received, the completed PDUs are placed into the socket receive
732 * buffer.
733 *
734 * The TLS code reuses the ulp_pdu_reclaimq to hold the pending mbufs.
735 */
736 static int
do_tls_data(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)737 do_tls_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
738 {
739 struct adapter *sc = iq->adapter;
740 const struct cpl_tls_data *cpl = mtod(m, const void *);
741 unsigned int tid = GET_TID(cpl);
742 struct toepcb *toep = lookup_tid(sc, tid);
743 struct inpcb *inp = toep->inp;
744 struct tcpcb *tp;
745 int len;
746
747 /* XXX: Should this match do_rx_data instead? */
748 KASSERT(!(toep->flags & TPF_SYNQE),
749 ("%s: toep %p claims to be a synq entry", __func__, toep));
750
751 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
752
753 /* strip off CPL header */
754 m_adj(m, sizeof(*cpl));
755 len = m->m_pkthdr.len;
756
757 toep->ofld_rxq->rx_toe_tls_octets += len;
758
759 KASSERT(len == G_CPL_TLS_DATA_LENGTH(be32toh(cpl->length_pkd)),
760 ("%s: payload length mismatch", __func__));
761
762 INP_WLOCK(inp);
763 if (inp->inp_flags & INP_DROPPED) {
764 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
765 __func__, tid, len, inp->inp_flags);
766 INP_WUNLOCK(inp);
767 m_freem(m);
768 return (0);
769 }
770
771 /* Save TCP sequence number. */
772 m->m_pkthdr.tls_tcp_seq = be32toh(cpl->seq);
773
774 if (mbufq_enqueue(&toep->ulp_pdu_reclaimq, m)) {
775 #ifdef INVARIANTS
776 panic("Failed to queue TLS data packet");
777 #else
778 printf("%s: Failed to queue TLS data packet\n", __func__);
779 INP_WUNLOCK(inp);
780 m_freem(m);
781 return (0);
782 #endif
783 }
784
785 tp = intotcpcb(inp);
786 tp->t_rcvtime = ticks;
787
788 #ifdef VERBOSE_TRACES
789 CTR4(KTR_CXGBE, "%s: tid %u len %d seq %u", __func__, tid, len,
790 be32toh(cpl->seq));
791 #endif
792
793 INP_WUNLOCK(inp);
794 return (0);
795 }
796
797 static int
do_rx_tls_cmp(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)798 do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
799 {
800 struct adapter *sc = iq->adapter;
801 const struct cpl_rx_tls_cmp *cpl = mtod(m, const void *);
802 struct tlsrx_hdr_pkt *tls_hdr_pkt;
803 unsigned int tid = GET_TID(cpl);
804 struct toepcb *toep = lookup_tid(sc, tid);
805 struct inpcb *inp = toep->inp;
806 struct tcpcb *tp;
807 struct socket *so;
808 struct sockbuf *sb;
809 struct mbuf *tls_data;
810 struct tls_get_record *tgr;
811 struct mbuf *control;
812 int pdu_length, trailer_len;
813 #if defined(KTR) || defined(INVARIANTS)
814 int len;
815 #endif
816
817 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
818 KASSERT(!(toep->flags & TPF_SYNQE),
819 ("%s: toep %p claims to be a synq entry", __func__, toep));
820
821 /* strip off CPL header */
822 m_adj(m, sizeof(*cpl));
823 #if defined(KTR) || defined(INVARIANTS)
824 len = m->m_pkthdr.len;
825 #endif
826
827 toep->ofld_rxq->rx_toe_tls_records++;
828
829 KASSERT(len == G_CPL_RX_TLS_CMP_LENGTH(be32toh(cpl->pdulength_length)),
830 ("%s: payload length mismatch", __func__));
831
832 INP_WLOCK(inp);
833 if (inp->inp_flags & INP_DROPPED) {
834 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
835 __func__, tid, len, inp->inp_flags);
836 INP_WUNLOCK(inp);
837 m_freem(m);
838 return (0);
839 }
840
841 pdu_length = G_CPL_RX_TLS_CMP_PDULENGTH(be32toh(cpl->pdulength_length));
842
843 so = inp_inpcbtosocket(inp);
844 tp = intotcpcb(inp);
845
846 #ifdef VERBOSE_TRACES
847 CTR6(KTR_CXGBE, "%s: tid %u PDU len %d len %d seq %u, rcv_nxt %u",
848 __func__, tid, pdu_length, len, be32toh(cpl->seq), tp->rcv_nxt);
849 #endif
850
851 tp->rcv_nxt += pdu_length;
852 KASSERT(tp->rcv_wnd >= pdu_length,
853 ("%s: negative window size", __func__));
854 tp->rcv_wnd -= pdu_length;
855
856 /* XXX: Not sure what to do about urgent data. */
857
858 /*
859 * The payload of this CPL is the TLS header followed by
860 * additional fields.
861 */
862 KASSERT(m->m_len >= sizeof(*tls_hdr_pkt),
863 ("%s: payload too small", __func__));
864 tls_hdr_pkt = mtod(m, void *);
865
866 tls_data = mbufq_dequeue(&toep->ulp_pdu_reclaimq);
867 if (tls_data != NULL) {
868 KASSERT(be32toh(cpl->seq) == tls_data->m_pkthdr.tls_tcp_seq,
869 ("%s: sequence mismatch", __func__));
870 }
871
872 /* Report decryption errors as EBADMSG. */
873 if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) != 0) {
874 CTR4(KTR_CXGBE, "%s: tid %u TLS error %#x ddp_vld %#x",
875 __func__, toep->tid, tls_hdr_pkt->res_to_mac_error,
876 be32toh(cpl->ddp_valid));
877 m_freem(m);
878 m_freem(tls_data);
879
880 CURVNET_SET(toep->vnet);
881 so->so_error = EBADMSG;
882 sorwakeup(so);
883
884 INP_WUNLOCK(inp);
885 CURVNET_RESTORE();
886
887 return (0);
888 }
889
890 /* Handle data received after the socket is closed. */
891 sb = &so->so_rcv;
892 SOCKBUF_LOCK(sb);
893 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) {
894 struct epoch_tracker et;
895
896 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)",
897 __func__, tid, pdu_length);
898 m_freem(m);
899 m_freem(tls_data);
900 SOCKBUF_UNLOCK(sb);
901 INP_WUNLOCK(inp);
902
903 CURVNET_SET(toep->vnet);
904 NET_EPOCH_ENTER(et);
905 INP_WLOCK(inp);
906 tp = tcp_drop(tp, ECONNRESET);
907 if (tp != NULL)
908 INP_WUNLOCK(inp);
909 NET_EPOCH_EXIT(et);
910 CURVNET_RESTORE();
911
912 return (0);
913 }
914
915 /*
916 * If there is any data in the 'sb_mtls' chain of the socket
917 * or we aren't able to allocate the control mbuf, append the
918 * record as a CSUM_TLS_DECRYPTED packet to 'sb_mtls' rather
919 * than as a decrypted record to 'sb_m'.
920 */
921 if (sb->sb_mtls != NULL)
922 control = NULL;
923 else
924 control = sbcreatecontrol(NULL, sizeof(*tgr), TLS_GET_RECORD,
925 IPPROTO_TCP, M_NOWAIT);
926
927 if (control != NULL) {
928 tgr = (struct tls_get_record *)
929 CMSG_DATA(mtod(control, struct cmsghdr *));
930 memset(tgr, 0, sizeof(*tgr));
931 tgr->tls_type = tls_hdr_pkt->type;
932 tgr->tls_vmajor = be16toh(tls_hdr_pkt->version) >> 8;
933 tgr->tls_vminor = be16toh(tls_hdr_pkt->version) & 0xff;
934 if (tls_data != NULL) {
935 m_last(tls_data)->m_flags |= M_EOR;
936 tgr->tls_length = htobe16(tls_data->m_pkthdr.len);
937 } else
938 tgr->tls_length = 0;
939
940 m_freem(m);
941 m = tls_data;
942 } else {
943 M_ASSERTPKTHDR(m);
944
945 /* It's ok that any explicit IV is missing. */
946 m->m_len = sb->sb_tls_info->params.tls_hlen;
947 m->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
948 m->m_pkthdr.len = m->m_len;
949 if (tls_data != NULL) {
950 m->m_pkthdr.len += tls_data->m_pkthdr.len;
951 m_demote_pkthdr(tls_data);
952 m->m_next = tls_data;
953 }
954
955 /*
956 * Grow the chain by the trailer, but without
957 * contents. The trailer will be thrown away by
958 * ktls_decrypt. Note that ktls_decrypt assumes the
959 * trailer is tls_tlen bytes long, so append that many
960 * bytes not the actual trailer size computed from
961 * pdu_length.
962 */
963 trailer_len = sb->sb_tls_info->params.tls_tlen;
964 if (tls_data != NULL) {
965 m_last(tls_data)->m_len += trailer_len;
966 tls_data = NULL;
967 } else
968 m->m_len += trailer_len;
969 m->m_pkthdr.len += trailer_len;
970 tls_hdr_pkt->length = htobe16(m->m_pkthdr.len -
971 sizeof(struct tls_record_layer));
972 }
973
974 /* receive buffer autosize */
975 MPASS(toep->vnet == so->so_vnet);
976 CURVNET_SET(toep->vnet);
977 if (sb->sb_flags & SB_AUTOSIZE &&
978 V_tcp_do_autorcvbuf &&
979 sb->sb_hiwat < V_tcp_autorcvbuf_max &&
980 m->m_pkthdr.len > (sbspace(sb) / 8 * 7)) {
981 unsigned int hiwat = sb->sb_hiwat;
982 unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc,
983 V_tcp_autorcvbuf_max);
984
985 if (!sbreserve_locked(so, SO_RCV, newsize, NULL))
986 sb->sb_flags &= ~SB_AUTOSIZE;
987 }
988
989 if (control != NULL)
990 sbappendcontrol_locked(sb, m, control, 0);
991 else
992 sbappendstream_locked(sb, m, 0);
993 t4_rcvd_locked(&toep->td->tod, tp);
994
995 sorwakeup_locked(so);
996 SOCKBUF_UNLOCK_ASSERT(sb);
997
998 INP_WUNLOCK(inp);
999 CURVNET_RESTORE();
1000 return (0);
1001 }
1002
1003 void
do_rx_data_tls(const struct cpl_rx_data * cpl,struct toepcb * toep,struct mbuf * m)1004 do_rx_data_tls(const struct cpl_rx_data *cpl, struct toepcb *toep,
1005 struct mbuf *m)
1006 {
1007 struct inpcb *inp = toep->inp;
1008 struct tls_ofld_info *tls_ofld = &toep->tls;
1009 struct tls_hdr *hdr;
1010 struct tcpcb *tp;
1011 struct socket *so;
1012 struct sockbuf *sb;
1013 int len;
1014
1015 len = m->m_pkthdr.len;
1016
1017 INP_WLOCK_ASSERT(inp);
1018
1019 so = inp_inpcbtosocket(inp);
1020 tp = intotcpcb(inp);
1021 sb = &so->so_rcv;
1022 SOCKBUF_LOCK(sb);
1023 CURVNET_SET(toep->vnet);
1024
1025 tp->rcv_nxt += len;
1026 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__));
1027 tp->rcv_wnd -= len;
1028
1029 /* Do we have a full TLS header? */
1030 if (len < sizeof(*hdr)) {
1031 CTR3(KTR_CXGBE, "%s: tid %u len %d: too short for a TLS header",
1032 __func__, toep->tid, len);
1033 so->so_error = EMSGSIZE;
1034 goto out;
1035 }
1036 hdr = mtod(m, struct tls_hdr *);
1037
1038 /* Is the header valid? */
1039 if (be16toh(hdr->version) != tls_ofld->rx_version) {
1040 CTR3(KTR_CXGBE, "%s: tid %u invalid version %04x",
1041 __func__, toep->tid, be16toh(hdr->version));
1042 so->so_error = EINVAL;
1043 goto out;
1044 }
1045 if (be16toh(hdr->length) < sizeof(*hdr)) {
1046 CTR3(KTR_CXGBE, "%s: tid %u invalid length %u",
1047 __func__, toep->tid, be16toh(hdr->length));
1048 so->so_error = EBADMSG;
1049 goto out;
1050 }
1051
1052 /* Did we get a truncated record? */
1053 if (len < be16toh(hdr->length)) {
1054 CTR4(KTR_CXGBE, "%s: tid %u truncated TLS record (%d vs %u)",
1055 __func__, toep->tid, len, be16toh(hdr->length));
1056
1057 so->so_error = EMSGSIZE;
1058 goto out;
1059 }
1060
1061 /* Is the header type unknown? */
1062 switch (hdr->type) {
1063 case CONTENT_TYPE_CCS:
1064 case CONTENT_TYPE_ALERT:
1065 case CONTENT_TYPE_APP_DATA:
1066 case CONTENT_TYPE_HANDSHAKE:
1067 break;
1068 default:
1069 CTR3(KTR_CXGBE, "%s: tid %u invalid TLS record type %u",
1070 __func__, toep->tid, hdr->type);
1071 so->so_error = EBADMSG;
1072 goto out;
1073 }
1074
1075 /*
1076 * Just punt. Although this could fall back to software
1077 * decryption, this case should never really happen.
1078 */
1079 CTR4(KTR_CXGBE, "%s: tid %u dropping TLS record type %u, length %u",
1080 __func__, toep->tid, hdr->type, be16toh(hdr->length));
1081 so->so_error = EBADMSG;
1082
1083 out:
1084 sorwakeup_locked(so);
1085 SOCKBUF_UNLOCK_ASSERT(sb);
1086
1087 INP_WUNLOCK(inp);
1088 CURVNET_RESTORE();
1089
1090 m_freem(m);
1091 }
1092
1093 /*
1094 * Send a work request setting multiple TCB fields to enable
1095 * ULP_MODE_TLS.
1096 */
1097 static void
tls_update_tcb(struct adapter * sc,struct toepcb * toep,uint64_t seqno)1098 tls_update_tcb(struct adapter *sc, struct toepcb *toep, uint64_t seqno)
1099 {
1100 struct mbuf *m;
1101 struct work_request_hdr *wrh;
1102 struct ulp_txpkt *ulpmc;
1103 int fields, key_offset, len;
1104
1105 KASSERT(ulp_mode(toep) == ULP_MODE_NONE,
1106 ("%s: tid %d already ULP_MODE_TLS", __func__, toep->tid));
1107
1108 fields = 0;
1109
1110 /* 2 writes for the overlay region */
1111 fields += 2;
1112
1113 /* W_TCB_TLS_SEQ */
1114 fields++;
1115
1116 /* W_TCB_ULP_RAW */
1117 fields++;
1118
1119 /* W_TCB_ULP_TYPE */
1120 fields ++;
1121
1122 /* W_TCB_T_FLAGS */
1123 fields++;
1124
1125 len = sizeof(*wrh) + fields * roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1126 KASSERT(len <= SGE_MAX_WR_LEN,
1127 ("%s: WR with %d TCB field updates too large", __func__, fields));
1128
1129 m = alloc_raw_wr_mbuf(len);
1130 if (m == NULL) {
1131 /* XXX */
1132 panic("%s: out of memory", __func__);
1133 }
1134
1135 wrh = mtod(m, struct work_request_hdr *);
1136 INIT_ULPTX_WRH(wrh, len, 1, toep->tid); /* atomic */
1137 ulpmc = (struct ulp_txpkt *)(wrh + 1);
1138
1139 /*
1140 * Clear the TLS overlay region: 1023:832.
1141 *
1142 * Words 26/27 are always set to zero. Words 28/29
1143 * contain seqno and are set when enabling TLS
1144 * decryption. Word 30 is zero and Word 31 contains
1145 * the keyid.
1146 */
1147 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 26,
1148 0xffffffffffffffff, 0);
1149
1150 /*
1151 * RX key tags are an index into the key portion of MA
1152 * memory stored as an offset from the base address in
1153 * units of 64 bytes.
1154 */
1155 key_offset = toep->tls.rx_key_addr - sc->vres.key.start;
1156 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 30,
1157 0xffffffffffffffff,
1158 (uint64_t)V_TCB_RX_TLS_KEY_TAG(key_offset / 64) << 32);
1159
1160 CTR3(KTR_CXGBE, "%s: tid %d enable TLS seqno %lu", __func__,
1161 toep->tid, seqno);
1162 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_TLS_SEQ,
1163 V_TCB_TLS_SEQ(M_TCB_TLS_SEQ), V_TCB_TLS_SEQ(seqno));
1164 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_ULP_RAW,
1165 V_TCB_ULP_RAW(M_TCB_ULP_RAW),
1166 V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) | V_TF_TLS_CONTROL(1) |
1167 V_TF_TLS_ACTIVE(1) | V_TF_TLS_ENABLE(1))));
1168
1169 toep->flags &= ~TPF_TLS_STARTING;
1170 toep->flags |= TPF_TLS_RECEIVE;
1171
1172 /* Set the ULP mode to ULP_MODE_TLS. */
1173 toep->params.ulp_mode = ULP_MODE_TLS;
1174 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_ULP_TYPE,
1175 V_TCB_ULP_TYPE(M_TCB_ULP_TYPE), V_TCB_ULP_TYPE(ULP_MODE_TLS));
1176
1177 /* Clear TF_RX_QUIESCE. */
1178 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_T_FLAGS,
1179 V_TF_RX_QUIESCE(1), 0);
1180
1181 t4_raw_wr_tx(sc, toep, m);
1182 }
1183
1184 /*
1185 * Examine the pending data in the socket buffer and either enable TLS
1186 * RX or request more encrypted data.
1187 */
1188 static void
tls_check_rx_sockbuf(struct adapter * sc,struct toepcb * toep,struct sockbuf * sb)1189 tls_check_rx_sockbuf(struct adapter *sc, struct toepcb *toep,
1190 struct sockbuf *sb)
1191 {
1192 uint64_t seqno;
1193 size_t resid;
1194 bool have_header;
1195
1196 SOCKBUF_LOCK_ASSERT(sb);
1197 MPASS(toep->tls.rx_resid == 0);
1198
1199 have_header = ktls_pending_rx_info(sb, &seqno, &resid);
1200 CTR5(KTR_CXGBE, "%s: tid %d have_header %d seqno %lu resid %zu",
1201 __func__, toep->tid, have_header, seqno, resid);
1202
1203 /*
1204 * If we have a partial header or we need fewer bytes than the
1205 * size of a TLS record, re-enable receive and pause again once
1206 * we get more data to try again.
1207 */
1208 if (!have_header || resid != 0) {
1209 CTR(KTR_CXGBE, "%s: tid %d waiting for more data", __func__,
1210 toep->tid);
1211 toep->flags &= ~TPF_TLS_RX_QUIESCED;
1212 t4_clear_rx_quiesce(toep);
1213 return;
1214 }
1215
1216 tls_update_tcb(sc, toep, seqno);
1217 }
1218
1219 void
tls_received_starting_data(struct adapter * sc,struct toepcb * toep,struct sockbuf * sb,int len)1220 tls_received_starting_data(struct adapter *sc, struct toepcb *toep,
1221 struct sockbuf *sb, int len)
1222 {
1223 MPASS(toep->flags & TPF_TLS_STARTING);
1224
1225 /* Data was received before quiescing took effect. */
1226 if ((toep->flags & TPF_TLS_RX_QUIESCING) != 0)
1227 return;
1228
1229 /*
1230 * A previous call to tls_check_rx_sockbuf needed more data.
1231 * Now that more data has arrived, quiesce receive again and
1232 * check the state once the quiesce has completed.
1233 */
1234 if ((toep->flags & TPF_TLS_RX_QUIESCED) == 0) {
1235 CTR(KTR_CXGBE, "%s: tid %d quiescing", __func__, toep->tid);
1236 toep->flags |= TPF_TLS_RX_QUIESCING;
1237 t4_set_rx_quiesce(toep);
1238 return;
1239 }
1240
1241 KASSERT(len <= toep->tls.rx_resid,
1242 ("%s: received excess bytes %d (waiting for %zu)", __func__, len,
1243 toep->tls.rx_resid));
1244 toep->tls.rx_resid -= len;
1245 if (toep->tls.rx_resid != 0)
1246 return;
1247
1248 tls_check_rx_sockbuf(sc, toep, sb);
1249 }
1250
1251 static int
do_tls_tcb_rpl(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)1252 do_tls_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1253 {
1254 struct adapter *sc = iq->adapter;
1255 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1);
1256 unsigned int tid = GET_TID(cpl);
1257 struct toepcb *toep;
1258 struct inpcb *inp;
1259 struct socket *so;
1260 struct sockbuf *sb;
1261
1262 if (cpl->status != CPL_ERR_NONE)
1263 panic("XXX: tcp_rpl failed: %d", cpl->status);
1264
1265 toep = lookup_tid(sc, tid);
1266 inp = toep->inp;
1267 switch (cpl->cookie) {
1268 case V_WORD(W_TCB_T_FLAGS) | V_COOKIE(CPL_COOKIE_TOM):
1269 INP_WLOCK(inp);
1270 if ((toep->flags & TPF_TLS_STARTING) == 0)
1271 panic("%s: connection is not starting TLS RX\n",
1272 __func__);
1273 MPASS((toep->flags & TPF_TLS_RX_QUIESCING) != 0);
1274
1275 toep->flags &= ~TPF_TLS_RX_QUIESCING;
1276 toep->flags |= TPF_TLS_RX_QUIESCED;
1277
1278 so = inp->inp_socket;
1279 sb = &so->so_rcv;
1280 SOCKBUF_LOCK(sb);
1281 tls_check_rx_sockbuf(sc, toep, sb);
1282 SOCKBUF_UNLOCK(sb);
1283 INP_WUNLOCK(inp);
1284 break;
1285 default:
1286 panic("XXX: unknown tcb_rpl offset %#x, cookie %#x",
1287 G_WORD(cpl->cookie), G_COOKIE(cpl->cookie));
1288 }
1289
1290 return (0);
1291 }
1292
1293 void
t4_tls_mod_load(void)1294 t4_tls_mod_load(void)
1295 {
1296
1297 t4_register_cpl_handler(CPL_TLS_DATA, do_tls_data);
1298 t4_register_cpl_handler(CPL_RX_TLS_CMP, do_rx_tls_cmp);
1299 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_tls_tcb_rpl,
1300 CPL_COOKIE_TOM);
1301 }
1302
1303 void
t4_tls_mod_unload(void)1304 t4_tls_mod_unload(void)
1305 {
1306
1307 t4_register_cpl_handler(CPL_TLS_DATA, NULL);
1308 t4_register_cpl_handler(CPL_RX_TLS_CMP, NULL);
1309 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_TOM);
1310 }
1311 #endif /* TCP_OFFLOAD */
1312 #endif /* KERN_TLS */
1313