xref: /freebsd/sys/dev/cxgbe/tom/t4_tom.c (revision d9cc3d558d00ee7f62dbef2032f099033c91d2a1)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2012 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: Navdeep Parhar <np@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 #include "opt_kern_tls.h"
34 #include "opt_ratelimit.h"
35 
36 #include <sys/param.h>
37 #include <sys/types.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/ktr.h>
41 #include <sys/lock.h>
42 #include <sys/limits.h>
43 #include <sys/module.h>
44 #include <sys/protosw.h>
45 #include <sys/domain.h>
46 #include <sys/refcount.h>
47 #include <sys/rmlock.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/sysctl.h>
51 #include <sys/taskqueue.h>
52 #include <net/if.h>
53 #include <net/if_var.h>
54 #include <net/if_types.h>
55 #include <net/if_vlan_var.h>
56 #include <netinet/in.h>
57 #include <netinet/in_pcb.h>
58 #include <netinet/in_var.h>
59 #include <netinet/ip.h>
60 #include <netinet/ip6.h>
61 #include <netinet6/scope6_var.h>
62 #define TCPSTATES
63 #include <netinet/tcp_fsm.h>
64 #include <netinet/tcp_seq.h>
65 #include <netinet/tcp_timer.h>
66 #include <netinet/tcp_var.h>
67 #include <netinet/toecore.h>
68 #include <netinet/cc/cc.h>
69 
70 #ifdef TCP_OFFLOAD
71 #include "common/common.h"
72 #include "common/t4_msg.h"
73 #include "common/t4_regs.h"
74 #include "common/t4_regs_values.h"
75 #include "common/t4_tcb.h"
76 #include "t4_clip.h"
77 #include "tom/t4_tom_l2t.h"
78 #include "tom/t4_tom.h"
79 #include "tom/t4_tls.h"
80 
81 static struct protosw toe_protosw;
82 static struct protosw toe6_protosw;
83 
84 /* Module ops */
85 static int t4_tom_mod_load(void);
86 static int t4_tom_mod_unload(void);
87 static int t4_tom_modevent(module_t, int, void *);
88 
89 /* ULD ops and helpers */
90 static int t4_tom_activate(struct adapter *);
91 static int t4_tom_deactivate(struct adapter *);
92 static int t4_tom_stop(struct adapter *);
93 static int t4_tom_restart(struct adapter *);
94 
95 static struct uld_info tom_uld_info = {
96 	.uld_activate = t4_tom_activate,
97 	.uld_deactivate = t4_tom_deactivate,
98 	.uld_stop = t4_tom_stop,
99 	.uld_restart = t4_tom_restart,
100 };
101 
102 static void release_offload_resources(struct toepcb *);
103 static void done_with_toepcb(struct toepcb *);
104 static int alloc_tid_tabs(struct adapter *);
105 static void free_tid_tabs(struct adapter *);
106 static void free_tom_data(struct adapter *, struct tom_data *);
107 static void reclaim_wr_resources(void *, int);
108 static void cleanup_stranded_tids(void *, int);
109 
110 struct toepcb *
111 alloc_toepcb(struct vi_info *vi, int flags)
112 {
113 	struct port_info *pi = vi->pi;
114 	struct adapter *sc = pi->adapter;
115 	struct toepcb *toep;
116 	int tx_credits, txsd_total, len;
117 
118 	/*
119 	 * The firmware counts tx work request credits in units of 16 bytes
120 	 * each.  Reserve room for an ABORT_REQ so the driver never has to worry
121 	 * about tx credits if it wants to abort a connection.
122 	 */
123 	tx_credits = sc->params.ofldq_wr_cred;
124 	tx_credits -= howmany(sizeof(struct cpl_abort_req), 16);
125 
126 	/*
127 	 * Shortest possible tx work request is a fw_ofld_tx_data_wr + 1 byte
128 	 * immediate payload, and firmware counts tx work request credits in
129 	 * units of 16 byte.  Calculate the maximum work requests possible.
130 	 */
131 	txsd_total = tx_credits /
132 	    howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16);
133 
134 	len = offsetof(struct toepcb, txsd) +
135 	    txsd_total * sizeof(struct ofld_tx_sdesc);
136 
137 	toep = malloc(len, M_CXGBE, M_ZERO | flags);
138 	if (toep == NULL)
139 		return (NULL);
140 
141 	refcount_init(&toep->refcount, 1);
142 	toep->td = sc->tom_softc;
143 	toep->incarnation = sc->incarnation;
144 	toep->vi = vi;
145 	toep->tid = -1;
146 	toep->tx_total = tx_credits;
147 	toep->tx_credits = tx_credits;
148 	mbufq_init(&toep->ulp_pduq, INT_MAX);
149 	mbufq_init(&toep->ulp_pdu_reclaimq, INT_MAX);
150 	toep->txsd_total = txsd_total;
151 	toep->txsd_avail = txsd_total;
152 	toep->txsd_pidx = 0;
153 	toep->txsd_cidx = 0;
154 	aiotx_init_toep(toep);
155 
156 	return (toep);
157 }
158 
159 /*
160  * Initialize a toepcb after its params have been filled out.
161  */
162 int
163 init_toepcb(struct vi_info *vi, struct toepcb *toep)
164 {
165 	struct conn_params *cp = &toep->params;
166 	struct port_info *pi = vi->pi;
167 	struct adapter *sc = pi->adapter;
168 	struct tx_cl_rl_params *tc;
169 
170 	if (cp->tc_idx >= 0 && cp->tc_idx < sc->params.nsched_cls) {
171 		tc = &pi->sched_params->cl_rl[cp->tc_idx];
172 		mtx_lock(&sc->tc_lock);
173 		if (tc->state != CS_HW_CONFIGURED) {
174 			CH_ERR(vi, "tid %d cannot be bound to traffic class %d "
175 			    "because it is not configured (its state is %d)\n",
176 			    toep->tid, cp->tc_idx, tc->state);
177 			cp->tc_idx = -1;
178 		} else {
179 			tc->refcount++;
180 		}
181 		mtx_unlock(&sc->tc_lock);
182 	}
183 	toep->ofld_txq = &sc->sge.ofld_txq[cp->txq_idx];
184 	toep->ofld_rxq = &sc->sge.ofld_rxq[cp->rxq_idx];
185 	toep->ctrlq = &sc->sge.ctrlq[pi->port_id];
186 
187 	tls_init_toep(toep);
188 	MPASS(ulp_mode(toep) != ULP_MODE_TCPDDP);
189 
190 	toep->flags |= TPF_INITIALIZED;
191 
192 	return (0);
193 }
194 
195 struct toepcb *
196 hold_toepcb(struct toepcb *toep)
197 {
198 
199 	refcount_acquire(&toep->refcount);
200 	return (toep);
201 }
202 
203 void
204 free_toepcb(struct toepcb *toep)
205 {
206 
207 	if (refcount_release(&toep->refcount) == 0)
208 		return;
209 
210 	KASSERT(!(toep->flags & TPF_ATTACHED),
211 	    ("%s: attached to an inpcb", __func__));
212 	KASSERT(!(toep->flags & TPF_CPL_PENDING),
213 	    ("%s: CPL pending", __func__));
214 
215 	if (toep->flags & TPF_INITIALIZED) {
216 		if (ulp_mode(toep) == ULP_MODE_TCPDDP)
217 			ddp_uninit_toep(toep);
218 		tls_uninit_toep(toep);
219 	}
220 	free(toep, M_CXGBE);
221 }
222 
223 /*
224  * Set up the socket for TCP offload.
225  */
226 void
227 offload_socket(struct socket *so, struct toepcb *toep)
228 {
229 	struct tom_data *td = toep->td;
230 	struct inpcb *inp = sotoinpcb(so);
231 	struct tcpcb *tp = intotcpcb(inp);
232 	struct sockbuf *sb;
233 
234 	INP_WLOCK_ASSERT(inp);
235 
236 	/* Update socket */
237 	sb = &so->so_snd;
238 	SOCKBUF_LOCK(sb);
239 	sb->sb_flags |= SB_NOCOALESCE;
240 	SOCKBUF_UNLOCK(sb);
241 	sb = &so->so_rcv;
242 	SOCKBUF_LOCK(sb);
243 	sb->sb_flags |= SB_NOCOALESCE;
244 	if (inp->inp_vflag & INP_IPV6)
245 		so->so_proto = &toe6_protosw;
246 	else
247 		so->so_proto = &toe_protosw;
248 	SOCKBUF_UNLOCK(sb);
249 
250 	/* Update TCP PCB */
251 	tp->tod = &td->tod;
252 	tp->t_toe = toep;
253 	tp->t_flags |= TF_TOE;
254 
255 	/* Install an extra hold on inp */
256 	toep->inp = inp;
257 	toep->flags |= TPF_ATTACHED;
258 	in_pcbref(inp);
259 }
260 
261 void
262 restore_so_proto(struct socket *so, bool v6)
263 {
264 	if (v6)
265 		so->so_proto = &tcp6_protosw;
266 	else
267 		so->so_proto = &tcp_protosw;
268 }
269 
270 /* This is _not_ the normal way to "unoffload" a socket. */
271 void
272 undo_offload_socket(struct socket *so)
273 {
274 	struct inpcb *inp = sotoinpcb(so);
275 	struct tcpcb *tp = intotcpcb(inp);
276 	struct toepcb *toep = tp->t_toe;
277 	struct sockbuf *sb;
278 
279 	INP_WLOCK_ASSERT(inp);
280 
281 	sb = &so->so_snd;
282 	SOCKBUF_LOCK(sb);
283 	sb->sb_flags &= ~SB_NOCOALESCE;
284 	SOCKBUF_UNLOCK(sb);
285 	sb = &so->so_rcv;
286 	SOCKBUF_LOCK(sb);
287 	sb->sb_flags &= ~SB_NOCOALESCE;
288 	restore_so_proto(so, inp->inp_vflag & INP_IPV6);
289 	SOCKBUF_UNLOCK(sb);
290 
291 	tp->tod = NULL;
292 	tp->t_toe = NULL;
293 	tp->t_flags &= ~TF_TOE;
294 
295 	toep->inp = NULL;
296 	toep->flags &= ~TPF_ATTACHED;
297 	if (in_pcbrele_wlocked(inp))
298 		panic("%s: inp freed.", __func__);
299 }
300 
301 static void
302 release_offload_resources(struct toepcb *toep)
303 {
304 	struct tom_data *td = toep->td;
305 	struct adapter *sc = td_adapter(td);
306 	int tid = toep->tid;
307 
308 	KASSERT(!(toep->flags & TPF_CPL_PENDING),
309 	    ("%s: %p has CPL pending.", __func__, toep));
310 
311 	CTR5(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p, ce %p)",
312 	    __func__, toep, tid, toep->l2te, toep->ce);
313 
314 	if (toep->l2te) {
315 		t4_l2t_release(toep->l2te);
316 		toep->l2te = NULL;
317 	}
318 	if (tid >= 0) {
319 		remove_tid(sc, tid, toep->ce ? 2 : 1);
320 		release_tid(sc, tid, toep->ctrlq);
321 		toep->tid = -1;
322 		mtx_lock(&td->toep_list_lock);
323 		if (toep->flags & TPF_IN_TOEP_LIST) {
324 			toep->flags &= ~TPF_IN_TOEP_LIST;
325 			TAILQ_REMOVE(&td->toep_list, toep, link);
326 		}
327 		mtx_unlock(&td->toep_list_lock);
328 	}
329 	if (toep->ce) {
330 		t4_release_clip_entry(sc, toep->ce);
331 		toep->ce = NULL;
332 	}
333 	if (toep->params.tc_idx != -1)
334 		t4_release_cl_rl(sc, toep->vi->pi->port_id, toep->params.tc_idx);
335 }
336 
337 /*
338  * Both the driver and kernel are done with the toepcb.
339  */
340 static void
341 done_with_toepcb(struct toepcb *toep)
342 {
343 	KASSERT(!(toep->flags & TPF_CPL_PENDING),
344 	    ("%s: %p has CPL pending.", __func__, toep));
345 	KASSERT(!(toep->flags & TPF_ATTACHED),
346 	    ("%s: %p is still attached.", __func__, toep));
347 
348 	CTR(KTR_CXGBE, "%s: toep %p (0x%x)", __func__, toep, toep->flags);
349 
350 	/*
351 	 * These queues should have been emptied at approximately the same time
352 	 * that a normal connection's socket's so_snd would have been purged or
353 	 * drained.  Do _not_ clean up here.
354 	 */
355 	MPASS(mbufq_empty(&toep->ulp_pduq));
356 	MPASS(mbufq_empty(&toep->ulp_pdu_reclaimq));
357 #ifdef INVARIANTS
358 	if (ulp_mode(toep) == ULP_MODE_TCPDDP)
359 		ddp_assert_empty(toep);
360 #endif
361 	MPASS(TAILQ_EMPTY(&toep->aiotx_jobq));
362 	MPASS(toep->tid == -1);
363 	MPASS(toep->l2te == NULL);
364 	MPASS(toep->ce == NULL);
365 	MPASS((toep->flags & TPF_IN_TOEP_LIST) == 0);
366 
367 	free_toepcb(toep);
368 }
369 
370 /*
371  * The kernel is done with the TCP PCB and this is our opportunity to unhook the
372  * toepcb hanging off of it.  If the TOE driver is also done with the toepcb (no
373  * pending CPL) then it is time to release all resources tied to the toepcb.
374  *
375  * Also gets called when an offloaded active open fails and the TOM wants the
376  * kernel to take the TCP PCB back.
377  */
378 void
379 t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp)
380 {
381 #if defined(KTR) || defined(INVARIANTS)
382 	struct inpcb *inp = tptoinpcb(tp);
383 #endif
384 	struct toepcb *toep = tp->t_toe;
385 
386 	INP_WLOCK_ASSERT(inp);
387 
388 	KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
389 	KASSERT(toep->flags & TPF_ATTACHED,
390 	    ("%s: not attached", __func__));
391 
392 #ifdef KTR
393 	if (tp->t_state == TCPS_SYN_SENT) {
394 		CTR6(KTR_CXGBE, "%s: atid %d, toep %p (0x%x), inp %p (0x%x)",
395 		    __func__, toep->tid, toep, toep->flags, inp,
396 		    inp->inp_flags);
397 	} else {
398 		CTR6(KTR_CXGBE,
399 		    "t4_pcb_detach: tid %d (%s), toep %p (0x%x), inp %p (0x%x)",
400 		    toep->tid, tcpstates[tp->t_state], toep, toep->flags, inp,
401 		    inp->inp_flags);
402 	}
403 #endif
404 
405 	tp->tod = NULL;
406 	tp->t_toe = NULL;
407 	tp->t_flags &= ~TF_TOE;
408 	toep->flags &= ~TPF_ATTACHED;
409 
410 	if (!(toep->flags & TPF_CPL_PENDING))
411 		done_with_toepcb(toep);
412 }
413 
414 /*
415  * setsockopt handler.
416  */
417 static void
418 t4_ctloutput(struct toedev *tod, struct tcpcb *tp, int dir, int name)
419 {
420 	struct adapter *sc = tod->tod_softc;
421 	struct toepcb *toep = tp->t_toe;
422 
423 	if (dir == SOPT_GET)
424 		return;
425 
426 	CTR4(KTR_CXGBE, "%s: tp %p, dir %u, name %u", __func__, tp, dir, name);
427 
428 	switch (name) {
429 	case TCP_NODELAY:
430 		if (tp->t_state != TCPS_ESTABLISHED)
431 			break;
432 		toep->params.nagle = tp->t_flags & TF_NODELAY ? 0 : 1;
433 		t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS,
434 		    V_TF_NAGLE(1), V_TF_NAGLE(toep->params.nagle), 0, 0);
435 		break;
436 	default:
437 		break;
438 	}
439 }
440 
441 static inline uint64_t
442 get_tcb_tflags(const uint64_t *tcb)
443 {
444 
445 	return ((be64toh(tcb[14]) << 32) | (be64toh(tcb[15]) >> 32));
446 }
447 
448 static inline uint32_t
449 get_tcb_field(const uint64_t *tcb, u_int word, uint32_t mask, u_int shift)
450 {
451 #define LAST_WORD ((TCB_SIZE / 4) - 1)
452 	uint64_t t1, t2;
453 	int flit_idx;
454 
455 	MPASS(mask != 0);
456 	MPASS(word <= LAST_WORD);
457 	MPASS(shift < 32);
458 
459 	flit_idx = (LAST_WORD - word) / 2;
460 	if (word & 0x1)
461 		shift += 32;
462 	t1 = be64toh(tcb[flit_idx]) >> shift;
463 	t2 = 0;
464 	if (fls(mask) > 64 - shift) {
465 		/*
466 		 * Will spill over into the next logical flit, which is the flit
467 		 * before this one.  The flit_idx before this one must be valid.
468 		 */
469 		MPASS(flit_idx > 0);
470 		t2 = be64toh(tcb[flit_idx - 1]) << (64 - shift);
471 	}
472 	return ((t2 | t1) & mask);
473 #undef LAST_WORD
474 }
475 #define GET_TCB_FIELD(tcb, F) \
476     get_tcb_field(tcb, W_TCB_##F, M_TCB_##F, S_TCB_##F)
477 
478 /*
479  * Issues a CPL_GET_TCB to read the entire TCB for the tid.
480  */
481 static int
482 send_get_tcb(struct adapter *sc, u_int tid)
483 {
484 	struct cpl_get_tcb *cpl;
485 	struct wrq_cookie cookie;
486 
487 	MPASS(tid >= sc->tids.tid_base);
488 	MPASS(tid - sc->tids.tid_base < sc->tids.ntids);
489 
490 	cpl = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*cpl), 16),
491 	    &cookie);
492 	if (__predict_false(cpl == NULL))
493 		return (ENOMEM);
494 	bzero(cpl, sizeof(*cpl));
495 	INIT_TP_WR(cpl, tid);
496 	OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_GET_TCB, tid));
497 	cpl->reply_ctrl = htobe16(V_REPLY_CHAN(0) |
498 	    V_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id));
499 	cpl->cookie = 0xff;
500 	commit_wrq_wr(&sc->sge.ctrlq[0], cpl, &cookie);
501 
502 	return (0);
503 }
504 
505 static struct tcb_histent *
506 alloc_tcb_histent(struct adapter *sc, u_int tid, int flags)
507 {
508 	struct tcb_histent *te;
509 
510 	MPASS(flags == M_NOWAIT || flags == M_WAITOK);
511 
512 	te = malloc(sizeof(*te), M_CXGBE, M_ZERO | flags);
513 	if (te == NULL)
514 		return (NULL);
515 	mtx_init(&te->te_lock, "TCB entry", NULL, MTX_DEF);
516 	callout_init_mtx(&te->te_callout, &te->te_lock, 0);
517 	te->te_adapter = sc;
518 	te->te_tid = tid;
519 
520 	return (te);
521 }
522 
523 static void
524 free_tcb_histent(struct tcb_histent *te)
525 {
526 
527 	mtx_destroy(&te->te_lock);
528 	free(te, M_CXGBE);
529 }
530 
531 /*
532  * Start tracking the tid in the TCB history.
533  */
534 int
535 add_tid_to_history(struct adapter *sc, u_int tid)
536 {
537 	struct tcb_histent *te = NULL;
538 	struct tom_data *td = sc->tom_softc;
539 	int rc;
540 
541 	MPASS(tid >= sc->tids.tid_base);
542 	MPASS(tid - sc->tids.tid_base < sc->tids.ntids);
543 
544 	if (td->tcb_history == NULL)
545 		return (ENXIO);
546 
547 	rw_wlock(&td->tcb_history_lock);
548 	if (td->tcb_history[tid] != NULL) {
549 		rc = EEXIST;
550 		goto done;
551 	}
552 	te = alloc_tcb_histent(sc, tid, M_NOWAIT);
553 	if (te == NULL) {
554 		rc = ENOMEM;
555 		goto done;
556 	}
557 	mtx_lock(&te->te_lock);
558 	rc = send_get_tcb(sc, tid);
559 	if (rc == 0) {
560 		te->te_flags |= TE_RPL_PENDING;
561 		td->tcb_history[tid] = te;
562 	} else {
563 		free(te, M_CXGBE);
564 	}
565 	mtx_unlock(&te->te_lock);
566 done:
567 	rw_wunlock(&td->tcb_history_lock);
568 	return (rc);
569 }
570 
571 static void
572 remove_tcb_histent(struct tcb_histent *te)
573 {
574 	struct adapter *sc = te->te_adapter;
575 	struct tom_data *td = sc->tom_softc;
576 
577 	rw_assert(&td->tcb_history_lock, RA_WLOCKED);
578 	mtx_assert(&te->te_lock, MA_OWNED);
579 	MPASS(td->tcb_history[te->te_tid] == te);
580 
581 	td->tcb_history[te->te_tid] = NULL;
582 	free_tcb_histent(te);
583 	rw_wunlock(&td->tcb_history_lock);
584 }
585 
586 static inline struct tcb_histent *
587 lookup_tcb_histent(struct adapter *sc, u_int tid, bool addrem)
588 {
589 	struct tcb_histent *te;
590 	struct tom_data *td = sc->tom_softc;
591 
592 	MPASS(tid >= sc->tids.tid_base);
593 	MPASS(tid - sc->tids.tid_base < sc->tids.ntids);
594 
595 	if (td->tcb_history == NULL)
596 		return (NULL);
597 
598 	if (addrem)
599 		rw_wlock(&td->tcb_history_lock);
600 	else
601 		rw_rlock(&td->tcb_history_lock);
602 	te = td->tcb_history[tid];
603 	if (te != NULL) {
604 		mtx_lock(&te->te_lock);
605 		return (te);	/* with both locks held */
606 	}
607 	if (addrem)
608 		rw_wunlock(&td->tcb_history_lock);
609 	else
610 		rw_runlock(&td->tcb_history_lock);
611 
612 	return (te);
613 }
614 
615 static inline void
616 release_tcb_histent(struct tcb_histent *te)
617 {
618 	struct adapter *sc = te->te_adapter;
619 	struct tom_data *td = sc->tom_softc;
620 
621 	mtx_assert(&te->te_lock, MA_OWNED);
622 	mtx_unlock(&te->te_lock);
623 	rw_assert(&td->tcb_history_lock, RA_RLOCKED);
624 	rw_runlock(&td->tcb_history_lock);
625 }
626 
627 static void
628 request_tcb(void *arg)
629 {
630 	struct tcb_histent *te = arg;
631 
632 	mtx_assert(&te->te_lock, MA_OWNED);
633 
634 	/* Noone else is supposed to update the histent. */
635 	MPASS(!(te->te_flags & TE_RPL_PENDING));
636 	if (send_get_tcb(te->te_adapter, te->te_tid) == 0)
637 		te->te_flags |= TE_RPL_PENDING;
638 	else
639 		callout_schedule(&te->te_callout, hz / 100);
640 }
641 
642 static void
643 update_tcb_histent(struct tcb_histent *te, const uint64_t *tcb)
644 {
645 	struct tom_data *td = te->te_adapter->tom_softc;
646 	uint64_t tflags = get_tcb_tflags(tcb);
647 	uint8_t sample = 0;
648 
649 	if (GET_TCB_FIELD(tcb, SND_MAX_RAW) != GET_TCB_FIELD(tcb, SND_UNA_RAW)) {
650 		if (GET_TCB_FIELD(tcb, T_RXTSHIFT) != 0)
651 			sample |= TS_RTO;
652 		if (GET_TCB_FIELD(tcb, T_DUPACKS) != 0)
653 			sample |= TS_DUPACKS;
654 		if (GET_TCB_FIELD(tcb, T_DUPACKS) >= td->dupack_threshold)
655 			sample |= TS_FASTREXMT;
656 	}
657 
658 	if (GET_TCB_FIELD(tcb, SND_MAX_RAW) != 0) {
659 		uint32_t snd_wnd;
660 
661 		sample |= TS_SND_BACKLOGGED;	/* for whatever reason. */
662 
663 		snd_wnd = GET_TCB_FIELD(tcb, RCV_ADV);
664 		if (tflags & V_TF_RECV_SCALE(1))
665 			snd_wnd <<= GET_TCB_FIELD(tcb, RCV_SCALE);
666 		if (GET_TCB_FIELD(tcb, SND_CWND) < snd_wnd)
667 			sample |= TS_CWND_LIMITED;	/* maybe due to CWND */
668 	}
669 
670 	if (tflags & V_TF_CCTRL_ECN(1)) {
671 
672 		/*
673 		 * CE marker on incoming IP hdr, echoing ECE back in the TCP
674 		 * hdr.  Indicates congestion somewhere on the way from the peer
675 		 * to this node.
676 		 */
677 		if (tflags & V_TF_CCTRL_ECE(1))
678 			sample |= TS_ECN_ECE;
679 
680 		/*
681 		 * ECE seen and CWR sent (or about to be sent).  Might indicate
682 		 * congestion on the way to the peer.  This node is reducing its
683 		 * congestion window in response.
684 		 */
685 		if (tflags & (V_TF_CCTRL_CWR(1) | V_TF_CCTRL_RFR(1)))
686 			sample |= TS_ECN_CWR;
687 	}
688 
689 	te->te_sample[te->te_pidx] = sample;
690 	if (++te->te_pidx == nitems(te->te_sample))
691 		te->te_pidx = 0;
692 	memcpy(te->te_tcb, tcb, TCB_SIZE);
693 	te->te_flags |= TE_ACTIVE;
694 }
695 
696 static int
697 do_get_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
698 {
699 	struct adapter *sc = iq->adapter;
700 	const struct cpl_get_tcb_rpl *cpl = mtod(m, const void *);
701 	const uint64_t *tcb = (const uint64_t *)(const void *)(cpl + 1);
702 	struct tcb_histent *te;
703 	const u_int tid = GET_TID(cpl);
704 	bool remove;
705 
706 	remove = GET_TCB_FIELD(tcb, T_STATE) == TCPS_CLOSED;
707 	te = lookup_tcb_histent(sc, tid, remove);
708 	if (te == NULL) {
709 		/* Not in the history.  Who issued the GET_TCB for this? */
710 		device_printf(sc->dev, "tcb %u: flags 0x%016jx, state %u, "
711 		    "srtt %u, sscale %u, rscale %u, cookie 0x%x\n", tid,
712 		    (uintmax_t)get_tcb_tflags(tcb), GET_TCB_FIELD(tcb, T_STATE),
713 		    GET_TCB_FIELD(tcb, T_SRTT), GET_TCB_FIELD(tcb, SND_SCALE),
714 		    GET_TCB_FIELD(tcb, RCV_SCALE), cpl->cookie);
715 		goto done;
716 	}
717 
718 	MPASS(te->te_flags & TE_RPL_PENDING);
719 	te->te_flags &= ~TE_RPL_PENDING;
720 	if (remove) {
721 		remove_tcb_histent(te);
722 	} else {
723 		update_tcb_histent(te, tcb);
724 		callout_reset(&te->te_callout, hz / 10, request_tcb, te);
725 		release_tcb_histent(te);
726 	}
727 done:
728 	m_freem(m);
729 	return (0);
730 }
731 
732 static void
733 fill_tcp_info_from_tcb(struct adapter *sc, uint64_t *tcb, struct tcp_info *ti)
734 {
735 	uint32_t v;
736 
737 	ti->tcpi_state = GET_TCB_FIELD(tcb, T_STATE);
738 
739 	v = GET_TCB_FIELD(tcb, T_SRTT);
740 	ti->tcpi_rtt = tcp_ticks_to_us(sc, v);
741 
742 	v = GET_TCB_FIELD(tcb, T_RTTVAR);
743 	ti->tcpi_rttvar = tcp_ticks_to_us(sc, v);
744 
745 	ti->tcpi_snd_ssthresh = GET_TCB_FIELD(tcb, SND_SSTHRESH);
746 	ti->tcpi_snd_cwnd = GET_TCB_FIELD(tcb, SND_CWND);
747 	ti->tcpi_rcv_nxt = GET_TCB_FIELD(tcb, RCV_NXT);
748 	ti->tcpi_rcv_adv = GET_TCB_FIELD(tcb, RCV_ADV);
749 	ti->tcpi_dupacks = GET_TCB_FIELD(tcb, T_DUPACKS);
750 
751 	v = GET_TCB_FIELD(tcb, TX_MAX);
752 	ti->tcpi_snd_nxt = v - GET_TCB_FIELD(tcb, SND_NXT_RAW);
753 	ti->tcpi_snd_una = v - GET_TCB_FIELD(tcb, SND_UNA_RAW);
754 	ti->tcpi_snd_max = v - GET_TCB_FIELD(tcb, SND_MAX_RAW);
755 
756 	/* Receive window being advertised by us. */
757 	ti->tcpi_rcv_wscale = GET_TCB_FIELD(tcb, SND_SCALE);	/* Yes, SND. */
758 	ti->tcpi_rcv_space = GET_TCB_FIELD(tcb, RCV_WND);
759 
760 	/* Send window */
761 	ti->tcpi_snd_wscale = GET_TCB_FIELD(tcb, RCV_SCALE);	/* Yes, RCV. */
762 	ti->tcpi_snd_wnd = GET_TCB_FIELD(tcb, RCV_ADV);
763 	if (get_tcb_tflags(tcb) & V_TF_RECV_SCALE(1))
764 		ti->tcpi_snd_wnd <<= ti->tcpi_snd_wscale;
765 	else
766 		ti->tcpi_snd_wscale = 0;
767 
768 }
769 
770 static void
771 fill_tcp_info_from_history(struct adapter *sc, struct tcb_histent *te,
772     struct tcp_info *ti)
773 {
774 
775 	fill_tcp_info_from_tcb(sc, te->te_tcb, ti);
776 }
777 
778 /*
779  * Reads the TCB for the given tid using a memory window and copies it to 'buf'
780  * in the same format as CPL_GET_TCB_RPL.
781  */
782 static void
783 read_tcb_using_memwin(struct adapter *sc, u_int tid, uint64_t *buf)
784 {
785 	int i, j, k, rc;
786 	uint32_t addr;
787 	u_char *tcb, tmp;
788 
789 	MPASS(tid >= sc->tids.tid_base);
790 	MPASS(tid - sc->tids.tid_base < sc->tids.ntids);
791 
792 	addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
793 	rc = read_via_memwin(sc, 2, addr, (uint32_t *)buf, TCB_SIZE);
794 	if (rc != 0)
795 		return;
796 
797 	tcb = (u_char *)buf;
798 	for (i = 0, j = TCB_SIZE - 16; i < j; i += 16, j -= 16) {
799 		for (k = 0; k < 16; k++) {
800 			tmp = tcb[i + k];
801 			tcb[i + k] = tcb[j + k];
802 			tcb[j + k] = tmp;
803 		}
804 	}
805 }
806 
807 static void
808 fill_tcp_info(struct adapter *sc, u_int tid, struct tcp_info *ti)
809 {
810 	uint64_t tcb[TCB_SIZE / sizeof(uint64_t)];
811 	struct tcb_histent *te;
812 
813 	ti->tcpi_toe_tid = tid;
814 	te = lookup_tcb_histent(sc, tid, false);
815 	if (te != NULL) {
816 		fill_tcp_info_from_history(sc, te, ti);
817 		release_tcb_histent(te);
818 	} else {
819 		if (!(sc->debug_flags & DF_DISABLE_TCB_CACHE)) {
820 			/* XXX: tell firmware to flush TCB cache. */
821 		}
822 		read_tcb_using_memwin(sc, tid, tcb);
823 		fill_tcp_info_from_tcb(sc, tcb, ti);
824 	}
825 }
826 
827 /*
828  * Called by the kernel to allow the TOE driver to "refine" values filled up in
829  * the tcp_info for an offloaded connection.
830  */
831 static void
832 t4_tcp_info(struct toedev *tod, const struct tcpcb *tp, struct tcp_info *ti)
833 {
834 	struct adapter *sc = tod->tod_softc;
835 	struct toepcb *toep = tp->t_toe;
836 
837 	INP_LOCK_ASSERT(tptoinpcb(tp));
838 	MPASS(ti != NULL);
839 
840 	fill_tcp_info(sc, toep->tid, ti);
841 }
842 
843 #ifdef KERN_TLS
844 static int
845 t4_alloc_tls_session(struct toedev *tod, struct tcpcb *tp,
846     struct ktls_session *tls, int direction)
847 {
848 	struct toepcb *toep = tp->t_toe;
849 
850 	INP_WLOCK_ASSERT(tptoinpcb(tp));
851 	MPASS(tls != NULL);
852 
853 	return (tls_alloc_ktls(toep, tls, direction));
854 }
855 #endif
856 
857 static void
858 send_mss_flowc_wr(struct adapter *sc, struct toepcb *toep)
859 {
860 	struct wrq_cookie cookie;
861 	struct fw_flowc_wr *flowc;
862 	struct ofld_tx_sdesc *txsd;
863 	const int flowclen = sizeof(*flowc) + sizeof(struct fw_flowc_mnemval);
864 	const int flowclen16 = howmany(flowclen, 16);
865 
866 	if (toep->tx_credits < flowclen16 || toep->txsd_avail == 0) {
867 		CH_ERR(sc, "%s: tid %u out of tx credits (%d, %d).\n", __func__,
868 		    toep->tid, toep->tx_credits, toep->txsd_avail);
869 		return;
870 	}
871 
872 	flowc = start_wrq_wr(&toep->ofld_txq->wrq, flowclen16, &cookie);
873 	if (__predict_false(flowc == NULL)) {
874 		CH_ERR(sc, "ENOMEM in %s for tid %u.\n", __func__, toep->tid);
875 		return;
876 	}
877 	flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
878 	    V_FW_FLOWC_WR_NPARAMS(1));
879 	flowc->flowid_len16 = htonl(V_FW_WR_LEN16(flowclen16) |
880 	    V_FW_WR_FLOWID(toep->tid));
881 	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_MSS;
882 	flowc->mnemval[0].val = htobe32(toep->params.emss);
883 
884 	txsd = &toep->txsd[toep->txsd_pidx];
885 	_Static_assert(flowclen16 <= MAX_OFLD_TX_SDESC_CREDITS,
886 	    "MAX_OFLD_TX_SDESC_CREDITS too small");
887 	txsd->tx_credits = flowclen16;
888 	txsd->plen = 0;
889 	toep->tx_credits -= txsd->tx_credits;
890 	if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
891 		toep->txsd_pidx = 0;
892 	toep->txsd_avail--;
893 	commit_wrq_wr(&toep->ofld_txq->wrq, flowc, &cookie);
894 }
895 
896 static void
897 t4_pmtu_update(struct toedev *tod, struct tcpcb *tp, tcp_seq seq, int mtu)
898 {
899 	struct work_request_hdr *wrh;
900 	struct ulp_txpkt *ulpmc;
901 	int idx, len;
902 	struct wrq_cookie cookie;
903 	struct inpcb *inp = tptoinpcb(tp);
904 	struct toepcb *toep = tp->t_toe;
905 	struct adapter *sc = td_adapter(toep->td);
906 	unsigned short *mtus = &sc->params.mtus[0];
907 
908 	INP_WLOCK_ASSERT(inp);
909 	MPASS(mtu > 0);	/* kernel is supposed to provide something usable. */
910 
911 	/* tp->snd_una and snd_max are in host byte order too. */
912 	seq = be32toh(seq);
913 
914 	CTR6(KTR_CXGBE, "%s: tid %d, seq 0x%08x, mtu %u, mtu_idx %u (%d)",
915 	    __func__, toep->tid, seq, mtu, toep->params.mtu_idx,
916 	    mtus[toep->params.mtu_idx]);
917 
918 	if (ulp_mode(toep) == ULP_MODE_NONE &&	/* XXX: Read TCB otherwise? */
919 	    (SEQ_LT(seq, tp->snd_una) || SEQ_GEQ(seq, tp->snd_max))) {
920 		CTR5(KTR_CXGBE,
921 		    "%s: tid %d, seq 0x%08x not in range [0x%08x, 0x%08x).",
922 		    __func__, toep->tid, seq, tp->snd_una, tp->snd_max);
923 		return;
924 	}
925 
926 	/* Find the best mtu_idx for the suggested MTU. */
927 	for (idx = 0; idx < NMTUS - 1 && mtus[idx + 1] <= mtu; idx++)
928 		continue;
929 	if (idx >= toep->params.mtu_idx)
930 		return;	/* Never increase the PMTU (just like the kernel). */
931 
932 	/*
933 	 * We'll send a compound work request with 2 SET_TCB_FIELDs -- the first
934 	 * one updates the mtu_idx and the second one triggers a retransmit.
935 	 */
936 	len = sizeof(*wrh) + 2 * roundup2(LEN__SET_TCB_FIELD_ULP, 16);
937 	wrh = start_wrq_wr(toep->ctrlq, howmany(len, 16), &cookie);
938 	if (wrh == NULL) {
939 		CH_ERR(sc, "failed to change mtu_idx of tid %d (%u -> %u).\n",
940 		    toep->tid, toep->params.mtu_idx, idx);
941 		return;
942 	}
943 	INIT_ULPTX_WRH(wrh, len, 1, 0);	/* atomic */
944 	ulpmc = (struct ulp_txpkt *)(wrh + 1);
945 	ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_T_MAXSEG,
946 	    V_TCB_T_MAXSEG(M_TCB_T_MAXSEG), V_TCB_T_MAXSEG(idx));
947 	ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_TIMESTAMP,
948 	    V_TCB_TIMESTAMP(0x7FFFFULL << 11), 0);
949 	commit_wrq_wr(toep->ctrlq, wrh, &cookie);
950 
951 	/* Update the software toepcb and tcpcb. */
952 	toep->params.mtu_idx = idx;
953 	tp->t_maxseg = mtus[toep->params.mtu_idx];
954 	if (inp->inp_inc.inc_flags & INC_ISIPV6)
955 		tp->t_maxseg -= sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
956 	else
957 		tp->t_maxseg -= sizeof(struct ip) + sizeof(struct tcphdr);
958 	toep->params.emss = tp->t_maxseg;
959 	if (tp->t_flags & TF_RCVD_TSTMP)
960 		toep->params.emss -= TCPOLEN_TSTAMP_APPA;
961 
962 	/* Update the firmware flowc. */
963 	send_mss_flowc_wr(sc, toep);
964 
965 	/* Update the MTU in the kernel's hostcache. */
966 	if (sc->tt.update_hc_on_pmtu_change != 0) {
967 		struct in_conninfo inc = {0};
968 
969 		inc.inc_fibnum = inp->inp_inc.inc_fibnum;
970 		if (inp->inp_inc.inc_flags & INC_ISIPV6) {
971 			inc.inc_flags |= INC_ISIPV6;
972 			inc.inc6_faddr = inp->inp_inc.inc6_faddr;
973 		} else {
974 			inc.inc_faddr = inp->inp_inc.inc_faddr;
975 		}
976 		tcp_hc_updatemtu(&inc, mtu);
977 	}
978 
979 	CTR6(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u), t_maxseg %u, emss %u",
980 	    __func__, toep->tid, toep->params.mtu_idx,
981 	    mtus[toep->params.mtu_idx], tp->t_maxseg, toep->params.emss);
982 }
983 
984 /*
985  * The TOE driver will not receive any more CPLs for the tid associated with the
986  * toepcb; release the hold on the inpcb.
987  */
988 void
989 final_cpl_received(struct toepcb *toep)
990 {
991 	struct inpcb *inp = toep->inp;
992 	bool need_wakeup;
993 
994 	KASSERT(inp != NULL, ("%s: inp is NULL", __func__));
995 	INP_WLOCK_ASSERT(inp);
996 	KASSERT(toep->flags & TPF_CPL_PENDING,
997 	    ("%s: CPL not pending already?", __func__));
998 
999 	CTR6(KTR_CXGBE, "%s: tid %d, toep %p (0x%x), inp %p (0x%x)",
1000 	    __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags);
1001 
1002 	if (ulp_mode(toep) == ULP_MODE_TCPDDP)
1003 		release_ddp_resources(toep);
1004 	toep->inp = NULL;
1005 	need_wakeup = (toep->flags & TPF_WAITING_FOR_FINAL) != 0;
1006 	toep->flags &= ~(TPF_CPL_PENDING | TPF_WAITING_FOR_FINAL);
1007 	mbufq_drain(&toep->ulp_pduq);
1008 	mbufq_drain(&toep->ulp_pdu_reclaimq);
1009 	release_offload_resources(toep);
1010 	if (!(toep->flags & TPF_ATTACHED))
1011 		done_with_toepcb(toep);
1012 
1013 	if (!in_pcbrele_wlocked(inp))
1014 		INP_WUNLOCK(inp);
1015 
1016 	if (need_wakeup) {
1017 		struct mtx *lock = mtx_pool_find(mtxpool_sleep, toep);
1018 
1019 		mtx_lock(lock);
1020 		wakeup(toep);
1021 		mtx_unlock(lock);
1022 	}
1023 }
1024 
1025 void
1026 insert_tid(struct adapter *sc, int tid, void *ctx, int ntids)
1027 {
1028 	struct tid_info *t = &sc->tids;
1029 
1030 	MPASS(tid >= t->tid_base);
1031 	MPASS(tid - t->tid_base < t->ntids);
1032 
1033 	t->tid_tab[tid - t->tid_base] = ctx;
1034 	atomic_add_int(&t->tids_in_use, ntids);
1035 }
1036 
1037 void *
1038 lookup_tid(struct adapter *sc, int tid)
1039 {
1040 	struct tid_info *t = &sc->tids;
1041 
1042 	return (t->tid_tab[tid - t->tid_base]);
1043 }
1044 
1045 void
1046 update_tid(struct adapter *sc, int tid, void *ctx)
1047 {
1048 	struct tid_info *t = &sc->tids;
1049 
1050 	t->tid_tab[tid - t->tid_base] = ctx;
1051 }
1052 
1053 void
1054 remove_tid(struct adapter *sc, int tid, int ntids)
1055 {
1056 	struct tid_info *t = &sc->tids;
1057 
1058 	t->tid_tab[tid - t->tid_base] = NULL;
1059 	atomic_subtract_int(&t->tids_in_use, ntids);
1060 }
1061 
1062 /*
1063  * What mtu_idx to use, given a 4-tuple.  Note that both s->mss and tcp_mssopt
1064  * have the MSS that we should advertise in our SYN.  Advertised MSS doesn't
1065  * account for any TCP options so the effective MSS (only payload, no headers or
1066  * options) could be different.
1067  */
1068 static int
1069 find_best_mtu_idx(struct adapter *sc, struct in_conninfo *inc,
1070     struct offload_settings *s)
1071 {
1072 	unsigned short *mtus = &sc->params.mtus[0];
1073 	int i, mss, mtu;
1074 
1075 	MPASS(inc != NULL);
1076 
1077 	mss = s->mss > 0 ? s->mss : tcp_mssopt(inc);
1078 	if (inc->inc_flags & INC_ISIPV6)
1079 		mtu = mss + sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1080 	else
1081 		mtu = mss + sizeof(struct ip) + sizeof(struct tcphdr);
1082 
1083 	for (i = 0; i < NMTUS - 1 && mtus[i + 1] <= mtu; i++)
1084 		continue;
1085 
1086 	return (i);
1087 }
1088 
1089 /*
1090  * Determine the receive window size for a socket.
1091  */
1092 u_long
1093 select_rcv_wnd(struct socket *so)
1094 {
1095 	unsigned long wnd;
1096 
1097 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1098 
1099 	wnd = sbspace(&so->so_rcv);
1100 	if (wnd < MIN_RCV_WND)
1101 		wnd = MIN_RCV_WND;
1102 
1103 	return min(wnd, MAX_RCV_WND);
1104 }
1105 
1106 int
1107 select_rcv_wscale(void)
1108 {
1109 	int wscale = 0;
1110 	unsigned long space = sb_max;
1111 
1112 	if (space > MAX_RCV_WND)
1113 		space = MAX_RCV_WND;
1114 
1115 	while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < space)
1116 		wscale++;
1117 
1118 	return (wscale);
1119 }
1120 
1121 __be64
1122 calc_options0(struct vi_info *vi, struct conn_params *cp)
1123 {
1124 	uint64_t opt0 = 0;
1125 
1126 	opt0 |= F_TCAM_BYPASS;
1127 
1128 	MPASS(cp->wscale >= 0 && cp->wscale <= M_WND_SCALE);
1129 	opt0 |= V_WND_SCALE(cp->wscale);
1130 
1131 	MPASS(cp->mtu_idx >= 0 && cp->mtu_idx < NMTUS);
1132 	opt0 |= V_MSS_IDX(cp->mtu_idx);
1133 
1134 	MPASS(cp->ulp_mode >= 0 && cp->ulp_mode <= M_ULP_MODE);
1135 	opt0 |= V_ULP_MODE(cp->ulp_mode);
1136 
1137 	MPASS(cp->opt0_bufsize >= 0 && cp->opt0_bufsize <= M_RCV_BUFSIZ);
1138 	opt0 |= V_RCV_BUFSIZ(cp->opt0_bufsize);
1139 
1140 	MPASS(cp->l2t_idx >= 0 && cp->l2t_idx < vi->adapter->vres.l2t.size);
1141 	opt0 |= V_L2T_IDX(cp->l2t_idx);
1142 
1143 	opt0 |= V_SMAC_SEL(vi->smt_idx);
1144 	opt0 |= V_TX_CHAN(vi->pi->tx_chan);
1145 
1146 	MPASS(cp->keepalive == 0 || cp->keepalive == 1);
1147 	opt0 |= V_KEEP_ALIVE(cp->keepalive);
1148 
1149 	MPASS(cp->nagle == 0 || cp->nagle == 1);
1150 	opt0 |= V_NAGLE(cp->nagle);
1151 
1152 	return (htobe64(opt0));
1153 }
1154 
1155 __be32
1156 calc_options2(struct vi_info *vi, struct conn_params *cp)
1157 {
1158 	uint32_t opt2 = 0;
1159 	struct port_info *pi = vi->pi;
1160 	struct adapter *sc = pi->adapter;
1161 
1162 	/*
1163 	 * rx flow control, rx coalesce, congestion control, and tx pace are all
1164 	 * explicitly set by the driver.  On T5+ the ISS is also set by the
1165 	 * driver to the value picked by the kernel.
1166 	 */
1167 	if (is_t4(sc)) {
1168 		opt2 |= F_RX_FC_VALID | F_RX_COALESCE_VALID;
1169 		opt2 |= F_CONG_CNTRL_VALID | F_PACE_VALID;
1170 	} else {
1171 		opt2 |= F_T5_OPT_2_VALID;	/* all 4 valid */
1172 		opt2 |= F_T5_ISS;		/* ISS provided in CPL */
1173 	}
1174 
1175 	MPASS(cp->sack == 0 || cp->sack == 1);
1176 	opt2 |= V_SACK_EN(cp->sack);
1177 
1178 	MPASS(cp->tstamp == 0 || cp->tstamp == 1);
1179 	opt2 |= V_TSTAMPS_EN(cp->tstamp);
1180 
1181 	if (cp->wscale > 0)
1182 		opt2 |= F_WND_SCALE_EN;
1183 
1184 	MPASS(cp->ecn == 0 || cp->ecn == 1);
1185 	opt2 |= V_CCTRL_ECN(cp->ecn);
1186 
1187 	opt2 |= V_TX_QUEUE(TX_MODQ(pi->tx_chan));
1188 	opt2 |= V_PACE(0);
1189 	opt2 |= F_RSS_QUEUE_VALID;
1190 	opt2 |= V_RSS_QUEUE(sc->sge.ofld_rxq[cp->rxq_idx].iq.abs_id);
1191 	if (chip_id(sc) <= CHELSIO_T6) {
1192 		MPASS(pi->rx_chan == 0 || pi->rx_chan == 1);
1193 		opt2 |= V_RX_CHANNEL(pi->rx_chan);
1194 	}
1195 
1196 	MPASS(cp->cong_algo >= 0 && cp->cong_algo <= M_CONG_CNTRL);
1197 	opt2 |= V_CONG_CNTRL(cp->cong_algo);
1198 
1199 	MPASS(cp->rx_coalesce == 0 || cp->rx_coalesce == 1);
1200 	if (cp->rx_coalesce == 1)
1201 		opt2 |= V_RX_COALESCE(M_RX_COALESCE);
1202 
1203 	opt2 |= V_RX_FC_DDP(0) | V_RX_FC_DISABLE(0);
1204 	MPASS(cp->ulp_mode != ULP_MODE_TCPDDP);
1205 
1206 	return (htobe32(opt2));
1207 }
1208 
1209 uint64_t
1210 select_ntuple(struct vi_info *vi, struct l2t_entry *e)
1211 {
1212 	struct adapter *sc = vi->adapter;
1213 	struct tp_params *tp = &sc->params.tp;
1214 	uint64_t ntuple = 0;
1215 
1216 	/*
1217 	 * Initialize each of the fields which we care about which are present
1218 	 * in the Compressed Filter Tuple.
1219 	 */
1220 	if (tp->vlan_shift >= 0 && EVL_VLANOFTAG(e->vlan) != CPL_L2T_VLAN_NONE)
1221 		ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift;
1222 
1223 	if (tp->port_shift >= 0)
1224 		ntuple |= (uint64_t)e->lport << tp->port_shift;
1225 
1226 	if (tp->protocol_shift >= 0)
1227 		ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift;
1228 
1229 	if (tp->vnic_shift >= 0 && tp->vnic_mode == FW_VNIC_MODE_PF_VF) {
1230 		ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vi->vin) |
1231 		    V_FT_VNID_ID_PF(sc->pf) | V_FT_VNID_ID_VLD(vi->vfvld)) <<
1232 		    tp->vnic_shift;
1233 	}
1234 
1235 	if (is_t4(sc))
1236 		return (htobe32((uint32_t)ntuple));
1237 	else
1238 		return (htobe64(V_FILTER_TUPLE(ntuple)));
1239 }
1240 
1241 /*
1242  * Initialize various connection parameters.
1243  */
1244 void
1245 init_conn_params(struct vi_info *vi , struct offload_settings *s,
1246     struct in_conninfo *inc, struct socket *so,
1247     const struct tcp_options *tcpopt, int16_t l2t_idx, struct conn_params *cp)
1248 {
1249 	struct port_info *pi = vi->pi;
1250 	struct adapter *sc = pi->adapter;
1251 	struct tom_tunables *tt = &sc->tt;
1252 	struct inpcb *inp = sotoinpcb(so);
1253 	struct tcpcb *tp = intotcpcb(inp);
1254 	u_long wnd;
1255 	u_int q_idx;
1256 
1257 	MPASS(s->offload != 0);
1258 
1259 	/* Congestion control algorithm */
1260 	if (s->cong_algo >= 0)
1261 		cp->cong_algo = s->cong_algo & M_CONG_CNTRL;
1262 	else if (sc->tt.cong_algorithm >= 0)
1263 		cp->cong_algo = tt->cong_algorithm & M_CONG_CNTRL;
1264 	else {
1265 		struct cc_algo *cc = CC_ALGO(tp);
1266 
1267 		if (strcasecmp(cc->name, "reno") == 0)
1268 			cp->cong_algo = CONG_ALG_RENO;
1269 		else if (strcasecmp(cc->name, "tahoe") == 0)
1270 			cp->cong_algo = CONG_ALG_TAHOE;
1271 		if (strcasecmp(cc->name, "newreno") == 0)
1272 			cp->cong_algo = CONG_ALG_NEWRENO;
1273 		if (strcasecmp(cc->name, "highspeed") == 0)
1274 			cp->cong_algo = CONG_ALG_HIGHSPEED;
1275 		else {
1276 			/*
1277 			 * Use newreno in case the algorithm selected by the
1278 			 * host stack is not supported by the hardware.
1279 			 */
1280 			cp->cong_algo = CONG_ALG_NEWRENO;
1281 		}
1282 	}
1283 
1284 	/* Tx traffic scheduling class. */
1285 	if (s->sched_class >= 0 && s->sched_class < sc->params.nsched_cls)
1286 		cp->tc_idx = s->sched_class;
1287 	else
1288 		cp->tc_idx = -1;
1289 
1290 	/* Nagle's algorithm. */
1291 	if (s->nagle >= 0)
1292 		cp->nagle = s->nagle > 0 ? 1 : 0;
1293 	else
1294 		cp->nagle = tp->t_flags & TF_NODELAY ? 0 : 1;
1295 
1296 	/* TCP Keepalive. */
1297 	if (V_tcp_always_keepalive || so_options_get(so) & SO_KEEPALIVE)
1298 		cp->keepalive = 1;
1299 	else
1300 		cp->keepalive = 0;
1301 
1302 	/* Optimization that's specific to T5 @ 40G. */
1303 	if (tt->tx_align >= 0)
1304 		cp->tx_align =  tt->tx_align > 0 ? 1 : 0;
1305 	else if (chip_id(sc) == CHELSIO_T5 &&
1306 	    (port_top_speed(pi) > 10 || sc->params.nports > 2))
1307 		cp->tx_align = 1;
1308 	else
1309 		cp->tx_align = 0;
1310 
1311 	/* ULP mode. */
1312 	cp->ulp_mode = ULP_MODE_NONE;
1313 
1314 	/* Rx coalescing. */
1315 	if (s->rx_coalesce >= 0)
1316 		cp->rx_coalesce = s->rx_coalesce > 0 ? 1 : 0;
1317 	else if (tt->rx_coalesce >= 0)
1318 		cp->rx_coalesce = tt->rx_coalesce > 0 ? 1 : 0;
1319 	else
1320 		cp->rx_coalesce = 1;	/* default */
1321 
1322 	/*
1323 	 * Index in the PMTU table.  This controls the MSS that we announce in
1324 	 * our SYN initially, but after ESTABLISHED it controls the MSS that we
1325 	 * use to send data.
1326 	 */
1327 	cp->mtu_idx = find_best_mtu_idx(sc, inc, s);
1328 
1329 	/* Tx queue for this connection. */
1330 	if (s->txq == QUEUE_RANDOM)
1331 		q_idx = arc4random();
1332 	else if (s->txq == QUEUE_ROUNDROBIN)
1333 		q_idx = atomic_fetchadd_int(&vi->txq_rr, 1);
1334 	else
1335 		q_idx = s->txq;
1336 	cp->txq_idx = vi->first_ofld_txq + q_idx % vi->nofldtxq;
1337 
1338 	/* Rx queue for this connection. */
1339 	if (s->rxq == QUEUE_RANDOM)
1340 		q_idx = arc4random();
1341 	else if (s->rxq == QUEUE_ROUNDROBIN)
1342 		q_idx = atomic_fetchadd_int(&vi->rxq_rr, 1);
1343 	else
1344 		q_idx = s->rxq;
1345 	cp->rxq_idx = vi->first_ofld_rxq + q_idx % vi->nofldrxq;
1346 
1347 	if (SOLISTENING(so)) {
1348 		/* Passive open */
1349 		MPASS(tcpopt != NULL);
1350 
1351 		/* TCP timestamp option */
1352 		if (tcpopt->tstamp &&
1353 		    (s->tstamp > 0 || (s->tstamp < 0 && V_tcp_do_rfc1323)))
1354 			cp->tstamp = 1;
1355 		else
1356 			cp->tstamp = 0;
1357 
1358 		/* SACK */
1359 		if (tcpopt->sack &&
1360 		    (s->sack > 0 || (s->sack < 0 && V_tcp_do_sack)))
1361 			cp->sack = 1;
1362 		else
1363 			cp->sack = 0;
1364 
1365 		/* Receive window scaling. */
1366 		if (tcpopt->wsf > 0 && tcpopt->wsf < 15 && V_tcp_do_rfc1323)
1367 			cp->wscale = select_rcv_wscale();
1368 		else
1369 			cp->wscale = 0;
1370 
1371 		/* ECN */
1372 		if (tcpopt->ecn &&	/* XXX: review. */
1373 		    (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn)))
1374 			cp->ecn = 1;
1375 		else
1376 			cp->ecn = 0;
1377 
1378 		wnd = max(so->sol_sbrcv_hiwat, MIN_RCV_WND);
1379 		cp->opt0_bufsize = min(wnd >> 10, M_RCV_BUFSIZ);
1380 
1381 		if (tt->sndbuf > 0)
1382 			cp->sndbuf = tt->sndbuf;
1383 		else if (so->sol_sbsnd_flags & SB_AUTOSIZE &&
1384 		    V_tcp_do_autosndbuf)
1385 			cp->sndbuf = 256 * 1024;
1386 		else
1387 			cp->sndbuf = so->sol_sbsnd_hiwat;
1388 	} else {
1389 		/* Active open */
1390 
1391 		/* TCP timestamp option */
1392 		if (s->tstamp > 0 ||
1393 		    (s->tstamp < 0 && (tp->t_flags & TF_REQ_TSTMP)))
1394 			cp->tstamp = 1;
1395 		else
1396 			cp->tstamp = 0;
1397 
1398 		/* SACK */
1399 		if (s->sack > 0 ||
1400 		    (s->sack < 0 && (tp->t_flags & TF_SACK_PERMIT)))
1401 			cp->sack = 1;
1402 		else
1403 			cp->sack = 0;
1404 
1405 		/* Receive window scaling */
1406 		if (tp->t_flags & TF_REQ_SCALE)
1407 			cp->wscale = select_rcv_wscale();
1408 		else
1409 			cp->wscale = 0;
1410 
1411 		/* ECN */
1412 		if (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn == 1))
1413 			cp->ecn = 1;
1414 		else
1415 			cp->ecn = 0;
1416 
1417 		SOCKBUF_LOCK(&so->so_rcv);
1418 		wnd = max(select_rcv_wnd(so), MIN_RCV_WND);
1419 		SOCKBUF_UNLOCK(&so->so_rcv);
1420 		cp->opt0_bufsize = min(wnd >> 10, M_RCV_BUFSIZ);
1421 
1422 		if (tt->sndbuf > 0)
1423 			cp->sndbuf = tt->sndbuf;
1424 		else {
1425 			SOCKBUF_LOCK(&so->so_snd);
1426 			if (so->so_snd.sb_flags & SB_AUTOSIZE &&
1427 			    V_tcp_do_autosndbuf)
1428 				cp->sndbuf = 256 * 1024;
1429 			else
1430 				cp->sndbuf = so->so_snd.sb_hiwat;
1431 			SOCKBUF_UNLOCK(&so->so_snd);
1432 		}
1433 	}
1434 
1435 	cp->l2t_idx = l2t_idx;
1436 
1437 	/* This will be initialized on ESTABLISHED. */
1438 	cp->emss = 0;
1439 }
1440 
1441 int
1442 negative_advice(int status)
1443 {
1444 
1445 	return (status == CPL_ERR_RTX_NEG_ADVICE ||
1446 	    status == CPL_ERR_PERSIST_NEG_ADVICE ||
1447 	    status == CPL_ERR_KEEPALV_NEG_ADVICE);
1448 }
1449 
1450 static int
1451 alloc_tid_tab(struct adapter *sc)
1452 {
1453 	struct tid_info *t = &sc->tids;
1454 
1455 	MPASS(t->ntids > 0);
1456 	MPASS(t->tid_tab == NULL);
1457 
1458 	t->tid_tab = malloc(t->ntids * sizeof(*t->tid_tab), M_CXGBE,
1459 	    M_ZERO | M_NOWAIT);
1460 	if (t->tid_tab == NULL)
1461 		return (ENOMEM);
1462 	atomic_store_rel_int(&t->tids_in_use, 0);
1463 
1464 	return (0);
1465 }
1466 
1467 static void
1468 free_tid_tab(struct adapter *sc)
1469 {
1470 	struct tid_info *t = &sc->tids;
1471 
1472 	KASSERT(t->tids_in_use == 0,
1473 	    ("%s: %d tids still in use.", __func__, t->tids_in_use));
1474 
1475 	free(t->tid_tab, M_CXGBE);
1476 	t->tid_tab = NULL;
1477 }
1478 
1479 static void
1480 free_tid_tabs(struct adapter *sc)
1481 {
1482 	free_tid_tab(sc);
1483 	free_stid_tab(sc);
1484 }
1485 
1486 static int
1487 alloc_tid_tabs(struct adapter *sc)
1488 {
1489 	int rc;
1490 
1491 	rc = alloc_tid_tab(sc);
1492 	if (rc != 0)
1493 		goto failed;
1494 
1495 	rc = alloc_stid_tab(sc);
1496 	if (rc != 0)
1497 		goto failed;
1498 
1499 	return (0);
1500 failed:
1501 	free_tid_tabs(sc);
1502 	return (rc);
1503 }
1504 
1505 static inline void
1506 alloc_tcb_history(struct adapter *sc, struct tom_data *td)
1507 {
1508 
1509 	if (sc->tids.ntids == 0 || sc->tids.ntids > 1024)
1510 		return;
1511 	rw_init(&td->tcb_history_lock, "TCB history");
1512 	td->tcb_history = malloc(sc->tids.ntids * sizeof(*td->tcb_history),
1513 	    M_CXGBE, M_ZERO | M_NOWAIT);
1514 	td->dupack_threshold = G_DUPACKTHRESH(t4_read_reg(sc, A_TP_PARA_REG0));
1515 }
1516 
1517 static inline void
1518 free_tcb_history(struct adapter *sc, struct tom_data *td)
1519 {
1520 #ifdef INVARIANTS
1521 	int i;
1522 
1523 	if (td->tcb_history != NULL) {
1524 		for (i = 0; i < sc->tids.ntids; i++) {
1525 			MPASS(td->tcb_history[i] == NULL);
1526 		}
1527 	}
1528 #endif
1529 	free(td->tcb_history, M_CXGBE);
1530 	if (rw_initialized(&td->tcb_history_lock))
1531 		rw_destroy(&td->tcb_history_lock);
1532 }
1533 
1534 static void
1535 free_tom_data(struct adapter *sc, struct tom_data *td)
1536 {
1537 
1538 	ASSERT_SYNCHRONIZED_OP(sc);
1539 
1540 	KASSERT(TAILQ_EMPTY(&td->toep_list),
1541 	    ("%s: TOE PCB list is not empty.", __func__));
1542 	KASSERT(td->lctx_count == 0,
1543 	    ("%s: lctx hash table is not empty.", __func__));
1544 
1545 	t4_free_ppod_region(&td->pr);
1546 
1547 	if (td->listen_mask != 0)
1548 		hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask);
1549 
1550 	if (mtx_initialized(&td->unsent_wr_lock))
1551 		mtx_destroy(&td->unsent_wr_lock);
1552 	if (mtx_initialized(&td->lctx_hash_lock))
1553 		mtx_destroy(&td->lctx_hash_lock);
1554 	if (mtx_initialized(&td->toep_list_lock))
1555 		mtx_destroy(&td->toep_list_lock);
1556 
1557 	free_tcb_history(sc, td);
1558 	free_tid_tabs(sc);
1559 	free(td, M_CXGBE);
1560 }
1561 
1562 static char *
1563 prepare_pkt(int open_type, uint16_t vtag, struct inpcb *inp, int *pktlen,
1564     int *buflen)
1565 {
1566 	char *pkt;
1567 	struct tcphdr *th;
1568 	int ipv6, len;
1569 	const int maxlen =
1570 	    max(sizeof(struct ether_header), sizeof(struct ether_vlan_header)) +
1571 	    max(sizeof(struct ip), sizeof(struct ip6_hdr)) +
1572 	    sizeof(struct tcphdr);
1573 
1574 	MPASS(open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN);
1575 
1576 	pkt = malloc(maxlen, M_CXGBE, M_ZERO | M_NOWAIT);
1577 	if (pkt == NULL)
1578 		return (NULL);
1579 
1580 	ipv6 = inp->inp_vflag & INP_IPV6;
1581 	len = 0;
1582 
1583 	if (EVL_VLANOFTAG(vtag) == 0xfff) {
1584 		struct ether_header *eh = (void *)pkt;
1585 
1586 		if (ipv6)
1587 			eh->ether_type = htons(ETHERTYPE_IPV6);
1588 		else
1589 			eh->ether_type = htons(ETHERTYPE_IP);
1590 
1591 		len += sizeof(*eh);
1592 	} else {
1593 		struct ether_vlan_header *evh = (void *)pkt;
1594 
1595 		evh->evl_encap_proto = htons(ETHERTYPE_VLAN);
1596 		evh->evl_tag = htons(vtag);
1597 		if (ipv6)
1598 			evh->evl_proto = htons(ETHERTYPE_IPV6);
1599 		else
1600 			evh->evl_proto = htons(ETHERTYPE_IP);
1601 
1602 		len += sizeof(*evh);
1603 	}
1604 
1605 	if (ipv6) {
1606 		struct ip6_hdr *ip6 = (void *)&pkt[len];
1607 
1608 		ip6->ip6_vfc = IPV6_VERSION;
1609 		ip6->ip6_plen = htons(sizeof(struct tcphdr));
1610 		ip6->ip6_nxt = IPPROTO_TCP;
1611 		if (open_type == OPEN_TYPE_ACTIVE) {
1612 			ip6->ip6_src = inp->in6p_laddr;
1613 			ip6->ip6_dst = inp->in6p_faddr;
1614 		} else if (open_type == OPEN_TYPE_LISTEN) {
1615 			ip6->ip6_src = inp->in6p_laddr;
1616 			ip6->ip6_dst = ip6->ip6_src;
1617 		}
1618 
1619 		len += sizeof(*ip6);
1620 	} else {
1621 		struct ip *ip = (void *)&pkt[len];
1622 
1623 		ip->ip_v = IPVERSION;
1624 		ip->ip_hl = sizeof(*ip) >> 2;
1625 		ip->ip_tos = inp->inp_ip_tos;
1626 		ip->ip_len = htons(sizeof(struct ip) + sizeof(struct tcphdr));
1627 		ip->ip_ttl = inp->inp_ip_ttl;
1628 		ip->ip_p = IPPROTO_TCP;
1629 		if (open_type == OPEN_TYPE_ACTIVE) {
1630 			ip->ip_src = inp->inp_laddr;
1631 			ip->ip_dst = inp->inp_faddr;
1632 		} else if (open_type == OPEN_TYPE_LISTEN) {
1633 			ip->ip_src = inp->inp_laddr;
1634 			ip->ip_dst = ip->ip_src;
1635 		}
1636 
1637 		len += sizeof(*ip);
1638 	}
1639 
1640 	th = (void *)&pkt[len];
1641 	if (open_type == OPEN_TYPE_ACTIVE) {
1642 		th->th_sport = inp->inp_lport;	/* network byte order already */
1643 		th->th_dport = inp->inp_fport;	/* ditto */
1644 	} else if (open_type == OPEN_TYPE_LISTEN) {
1645 		th->th_sport = inp->inp_lport;	/* network byte order already */
1646 		th->th_dport = th->th_sport;
1647 	}
1648 	len += sizeof(th);
1649 
1650 	*pktlen = *buflen = len;
1651 	return (pkt);
1652 }
1653 
1654 const struct offload_settings *
1655 lookup_offload_policy(struct adapter *sc, int open_type, struct mbuf *m,
1656     uint16_t vtag, struct inpcb *inp)
1657 {
1658 	const struct t4_offload_policy *op;
1659 	char *pkt;
1660 	struct offload_rule *r;
1661 	int i, matched, pktlen, buflen;
1662 	static const struct offload_settings allow_offloading_settings = {
1663 		.offload = 1,
1664 		.rx_coalesce = -1,
1665 		.cong_algo = -1,
1666 		.sched_class = -1,
1667 		.tstamp = -1,
1668 		.sack = -1,
1669 		.nagle = -1,
1670 		.ecn = -1,
1671 		.ddp = -1,
1672 		.tls = -1,
1673 		.txq = QUEUE_RANDOM,
1674 		.rxq = QUEUE_RANDOM,
1675 		.mss = -1,
1676 	};
1677 	static const struct offload_settings disallow_offloading_settings = {
1678 		.offload = 0,
1679 		/* rest is irrelevant when offload is off. */
1680 	};
1681 
1682 	rw_assert(&sc->policy_lock, RA_LOCKED);
1683 
1684 	/*
1685 	 * If there's no Connection Offloading Policy attached to the device
1686 	 * then we need to return a default static policy.  If
1687 	 * "cop_managed_offloading" is true, then we need to disallow
1688 	 * offloading until a COP is attached to the device.  Otherwise we
1689 	 * allow offloading ...
1690 	 */
1691 	op = sc->policy;
1692 	if (op == NULL) {
1693 		if (sc->tt.cop_managed_offloading)
1694 			return (&disallow_offloading_settings);
1695 		else
1696 			return (&allow_offloading_settings);
1697 	}
1698 
1699 	switch (open_type) {
1700 	case OPEN_TYPE_ACTIVE:
1701 	case OPEN_TYPE_LISTEN:
1702 		pkt = prepare_pkt(open_type, vtag, inp, &pktlen, &buflen);
1703 		break;
1704 	case OPEN_TYPE_PASSIVE:
1705 		MPASS(m != NULL);
1706 		pkt = mtod(m, char *);
1707 		MPASS(*pkt == CPL_PASS_ACCEPT_REQ);
1708 		pkt += sizeof(struct cpl_pass_accept_req);
1709 		pktlen = m->m_pkthdr.len - sizeof(struct cpl_pass_accept_req);
1710 		buflen = m->m_len - sizeof(struct cpl_pass_accept_req);
1711 		break;
1712 	default:
1713 		MPASS(0);
1714 		return (&disallow_offloading_settings);
1715 	}
1716 
1717 	if (pkt == NULL || pktlen == 0 || buflen == 0)
1718 		return (&disallow_offloading_settings);
1719 
1720 	matched = 0;
1721 	r = &op->rule[0];
1722 	for (i = 0; i < op->nrules; i++, r++) {
1723 		if (r->open_type != open_type &&
1724 		    r->open_type != OPEN_TYPE_DONTCARE) {
1725 			continue;
1726 		}
1727 		matched = bpf_filter(r->bpf_prog.bf_insns, pkt, pktlen, buflen);
1728 		if (matched)
1729 			break;
1730 	}
1731 
1732 	if (open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN)
1733 		free(pkt, M_CXGBE);
1734 
1735 	return (matched ? &r->settings : &disallow_offloading_settings);
1736 }
1737 
1738 static void
1739 reclaim_wr_resources(void *arg, int count)
1740 {
1741 	struct tom_data *td = arg;
1742 	STAILQ_HEAD(, wrqe) twr_list = STAILQ_HEAD_INITIALIZER(twr_list);
1743 	struct cpl_act_open_req *cpl;
1744 	u_int opcode, atid, tid;
1745 	struct wrqe *wr;
1746 	struct adapter *sc = td_adapter(td);
1747 
1748 	mtx_lock(&td->unsent_wr_lock);
1749 	STAILQ_SWAP(&td->unsent_wr_list, &twr_list, wrqe);
1750 	mtx_unlock(&td->unsent_wr_lock);
1751 
1752 	while ((wr = STAILQ_FIRST(&twr_list)) != NULL) {
1753 		STAILQ_REMOVE_HEAD(&twr_list, link);
1754 
1755 		cpl = wrtod(wr);
1756 		opcode = GET_OPCODE(cpl);
1757 
1758 		switch (opcode) {
1759 		case CPL_ACT_OPEN_REQ:
1760 		case CPL_ACT_OPEN_REQ6:
1761 			atid = G_TID_TID(be32toh(OPCODE_TID(cpl)));
1762 			CTR2(KTR_CXGBE, "%s: atid %u ", __func__, atid);
1763 			act_open_failure_cleanup(sc, lookup_atid(sc, atid),
1764 						 EHOSTUNREACH);
1765 			free(wr, M_CXGBE);
1766 			break;
1767 		case CPL_PASS_ACCEPT_RPL:
1768 			tid = GET_TID(cpl);
1769 			CTR2(KTR_CXGBE, "%s: tid %u ", __func__, tid);
1770 			synack_failure_cleanup(sc, lookup_tid(sc, tid));
1771 			free(wr, M_CXGBE);
1772 			break;
1773 		default:
1774 			log(LOG_ERR, "%s: leaked work request %p, wr_len %d, "
1775 			    "opcode %x\n", __func__, wr, wr->wr_len, opcode);
1776 			/* WR not freed here; go look at it with a debugger.  */
1777 		}
1778 	}
1779 }
1780 
1781 /*
1782  * Based on do_abort_req.  We treat an abrupt hardware stop as a connection
1783  * abort from the hardware.
1784  */
1785 static void
1786 live_tid_failure_cleanup(struct adapter *sc, struct toepcb *toep, u_int status)
1787 {
1788 	struct inpcb *inp;
1789 	struct tcpcb *tp;
1790 	struct epoch_tracker et;
1791 
1792 	MPASS(!(toep->flags & TPF_SYNQE));
1793 
1794 	inp = toep->inp;
1795 	CURVNET_SET(toep->vnet);
1796 	NET_EPOCH_ENTER(et);	/* for tcp_close */
1797 	INP_WLOCK(inp);
1798 	tp = intotcpcb(inp);
1799 	toep->flags |= TPF_ABORT_SHUTDOWN;
1800 	if ((inp->inp_flags & INP_DROPPED) == 0) {
1801 		struct socket *so = inp->inp_socket;
1802 
1803 		if (so != NULL)
1804 			so_error_set(so, status);
1805 		tp = tcp_close(tp);
1806 		if (tp == NULL)
1807 			INP_WLOCK(inp);	/* re-acquire */
1808 	}
1809 	final_cpl_received(toep);
1810 	NET_EPOCH_EXIT(et);
1811 	CURVNET_RESTORE();
1812 }
1813 
1814 static void
1815 cleanup_stranded_tids(void *arg, int count)
1816 {
1817 	TAILQ_HEAD(, toepcb) tlist = TAILQ_HEAD_INITIALIZER(tlist);
1818 	TAILQ_HEAD(, synq_entry) slist = TAILQ_HEAD_INITIALIZER(slist);
1819 	struct tom_data *td = arg;
1820 	struct adapter *sc = td_adapter(td);
1821 	struct toepcb *toep;
1822 	struct synq_entry *synqe;
1823 
1824 	/* Clean up synq entries. */
1825 	mtx_lock(&td->toep_list_lock);
1826 	TAILQ_SWAP(&td->stranded_synqe, &slist, synq_entry, link);
1827 	mtx_unlock(&td->toep_list_lock);
1828 	while ((synqe = TAILQ_FIRST(&slist)) != NULL) {
1829 		TAILQ_REMOVE(&slist, synqe, link);
1830 		MPASS(synqe->tid >= 0);	/* stale, was kept around for debug */
1831 		synqe->tid = -1;
1832 		synack_failure_cleanup(sc, synqe);
1833 	}
1834 
1835 	/* Clean up in-flight active opens. */
1836 	mtx_lock(&td->toep_list_lock);
1837 	TAILQ_SWAP(&td->stranded_atids, &tlist, toepcb, link);
1838 	mtx_unlock(&td->toep_list_lock);
1839 	while ((toep = TAILQ_FIRST(&tlist)) != NULL) {
1840 		TAILQ_REMOVE(&tlist, toep, link);
1841 		MPASS(toep->tid >= 0);	/* stale, was kept around for debug */
1842 		toep->tid = -1;
1843 		act_open_failure_cleanup(sc, toep, EHOSTUNREACH);
1844 	}
1845 
1846 	/* Clean up live connections. */
1847 	mtx_lock(&td->toep_list_lock);
1848 	TAILQ_SWAP(&td->stranded_tids, &tlist, toepcb, link);
1849 	mtx_unlock(&td->toep_list_lock);
1850 	while ((toep = TAILQ_FIRST(&tlist)) != NULL) {
1851 		TAILQ_REMOVE(&tlist, toep, link);
1852 		MPASS(toep->tid >= 0);	/* stale, was kept around for debug */
1853 		toep->tid = -1;
1854 		live_tid_failure_cleanup(sc, toep, ECONNABORTED);
1855 	}
1856 }
1857 
1858 /*
1859  * Ground control to Major TOM
1860  * Commencing countdown, engines on
1861  */
1862 static int
1863 t4_tom_activate(struct adapter *sc)
1864 {
1865 	struct tom_data *td;
1866 	struct toedev *tod;
1867 	struct vi_info *vi;
1868 	int i, rc, v;
1869 
1870 	ASSERT_SYNCHRONIZED_OP(sc);
1871 
1872 	/* per-adapter softc for TOM */
1873 	td = malloc(sizeof(*td), M_CXGBE, M_ZERO | M_NOWAIT);
1874 	if (td == NULL)
1875 		return (ENOMEM);
1876 
1877 	/* List of TOE PCBs and associated lock */
1878 	mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF);
1879 	TAILQ_INIT(&td->toep_list);
1880 	TAILQ_INIT(&td->synqe_list);
1881 	TAILQ_INIT(&td->stranded_atids);
1882 	TAILQ_INIT(&td->stranded_tids);
1883 	TASK_INIT(&td->cleanup_stranded_tids, 0, cleanup_stranded_tids, td);
1884 
1885 	/* Listen context */
1886 	mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF);
1887 	td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE,
1888 	    &td->listen_mask, HASH_NOWAIT);
1889 
1890 	/* List of WRs for which L2 resolution failed */
1891 	mtx_init(&td->unsent_wr_lock, "Unsent WR list lock", NULL, MTX_DEF);
1892 	STAILQ_INIT(&td->unsent_wr_list);
1893 	TASK_INIT(&td->reclaim_wr_resources, 0, reclaim_wr_resources, td);
1894 
1895 	/* TID tables */
1896 	rc = alloc_tid_tabs(sc);
1897 	if (rc != 0)
1898 		goto done;
1899 
1900 	rc = t4_init_ppod_region(&td->pr, &sc->vres.ddp,
1901 	    t4_read_reg(sc, A_ULP_RX_TDDP_PSZ), "TDDP page pods");
1902 	if (rc != 0)
1903 		goto done;
1904 	t4_set_reg_field(sc, A_ULP_RX_TDDP_TAGMASK,
1905 	    V_TDDPTAGMASK(M_TDDPTAGMASK), td->pr.pr_tag_mask);
1906 
1907 	alloc_tcb_history(sc, td);
1908 
1909 	/* toedev ops */
1910 	tod = &td->tod;
1911 	init_toedev(tod);
1912 	tod->tod_softc = sc;
1913 	tod->tod_connect = t4_connect;
1914 	tod->tod_listen_start = t4_listen_start;
1915 	tod->tod_listen_stop = t4_listen_stop;
1916 	tod->tod_rcvd = t4_rcvd;
1917 	tod->tod_output = t4_tod_output;
1918 	tod->tod_send_rst = t4_send_rst;
1919 	tod->tod_send_fin = t4_send_fin;
1920 	tod->tod_pcb_detach = t4_pcb_detach;
1921 	tod->tod_l2_update = t4_l2_update;
1922 	tod->tod_syncache_added = t4_syncache_added;
1923 	tod->tod_syncache_removed = t4_syncache_removed;
1924 	tod->tod_syncache_respond = t4_syncache_respond;
1925 	tod->tod_offload_socket = t4_offload_socket;
1926 	tod->tod_ctloutput = t4_ctloutput;
1927 	tod->tod_tcp_info = t4_tcp_info;
1928 #ifdef KERN_TLS
1929 	tod->tod_alloc_tls_session = t4_alloc_tls_session;
1930 #endif
1931 	tod->tod_pmtu_update = t4_pmtu_update;
1932 
1933 	for_each_port(sc, i) {
1934 		for_each_vi(sc->port[i], v, vi) {
1935 			SETTOEDEV(vi->ifp, &td->tod);
1936 		}
1937 	}
1938 
1939 	sc->tom_softc = td;
1940 	register_toedev(sc->tom_softc);
1941 
1942 done:
1943 	if (rc != 0)
1944 		free_tom_data(sc, td);
1945 	return (rc);
1946 }
1947 
1948 static int
1949 t4_tom_deactivate(struct adapter *sc)
1950 {
1951 	int rc = 0, i, v;
1952 	struct tom_data *td = sc->tom_softc;
1953 	struct vi_info *vi;
1954 
1955 	ASSERT_SYNCHRONIZED_OP(sc);
1956 
1957 	if (td == NULL)
1958 		return (0);	/* XXX. KASSERT? */
1959 
1960 	if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI))
1961 		return (EBUSY);	/* both iWARP and iSCSI rely on the TOE. */
1962 
1963 	if (sc->offload_map != 0) {
1964 		for_each_port(sc, i) {
1965 			for_each_vi(sc->port[i], v, vi) {
1966 				toe_capability(vi, false);
1967 				if_setcapenablebit(vi->ifp, 0, IFCAP_TOE);
1968 				SETTOEDEV(vi->ifp, NULL);
1969 			}
1970 		}
1971 		MPASS(sc->offload_map == 0);
1972 	}
1973 
1974 	mtx_lock(&td->toep_list_lock);
1975 	if (!TAILQ_EMPTY(&td->toep_list))
1976 		rc = EBUSY;
1977 	MPASS(TAILQ_EMPTY(&td->synqe_list));
1978 	MPASS(TAILQ_EMPTY(&td->stranded_tids));
1979 	mtx_unlock(&td->toep_list_lock);
1980 
1981 	mtx_lock(&td->lctx_hash_lock);
1982 	if (td->lctx_count > 0)
1983 		rc = EBUSY;
1984 	mtx_unlock(&td->lctx_hash_lock);
1985 
1986 	taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources);
1987 	taskqueue_drain(taskqueue_thread, &td->cleanup_stranded_tids);
1988 	mtx_lock(&td->unsent_wr_lock);
1989 	if (!STAILQ_EMPTY(&td->unsent_wr_list))
1990 		rc = EBUSY;
1991 	mtx_unlock(&td->unsent_wr_lock);
1992 
1993 	if (rc == 0) {
1994 		unregister_toedev(sc->tom_softc);
1995 		free_tom_data(sc, td);
1996 		sc->tom_softc = NULL;
1997 	}
1998 
1999 	return (rc);
2000 }
2001 
2002 static void
2003 stop_atids(struct adapter *sc)
2004 {
2005 	struct tom_data *td = sc->tom_softc;
2006 	struct tid_info *t = &sc->tids;
2007 	struct toepcb *toep;
2008 	int atid;
2009 
2010 	/*
2011 	 * Hashfilters and T6-KTLS are the only other users of atids but they're
2012 	 * both mutually exclusive with TOE.  That means t4_tom owns all the
2013 	 * atids in the table.
2014 	 */
2015 	MPASS(!is_hashfilter(sc));
2016 	if (is_t6(sc))
2017 		MPASS(!(sc->flags & KERN_TLS_ON));
2018 
2019 	/* New atids are not being allocated. */
2020 #ifdef INVARIANTS
2021 	mtx_lock(&t->atid_lock);
2022 	MPASS(t->atid_alloc_stopped == true);
2023 	mtx_unlock(&t->atid_lock);
2024 #endif
2025 
2026 	/*
2027 	 * In-use atids fall in one of these two categories:
2028 	 * a) Those waiting for L2 resolution before being submitted to
2029 	 *    hardware.
2030 	 * b) Those that have been submitted to hardware and are awaiting
2031 	 *    replies that will never arrive because the LLD is stopped.
2032 	 */
2033 	for (atid = 0; atid < t->natids; atid++) {
2034 		toep = lookup_atid(sc, atid);
2035 		if ((uintptr_t)toep >= (uintptr_t)&t->atid_tab[0] &&
2036 		    (uintptr_t)toep < (uintptr_t)&t->atid_tab[t->natids])
2037 			continue;
2038 		if (__predict_false(toep == NULL))
2039 			continue;
2040 		MPASS(toep->tid == atid);
2041 		MPASS(toep->incarnation == sc->incarnation);
2042 		/*
2043 		 * Take the atid out of the lookup table.  toep->tid is stale
2044 		 * after this but useful for debug.
2045 		 */
2046 		CTR(KTR_CXGBE, "%s: atid %d@%d STRANDED, removed from table",
2047 		    __func__, atid, toep->incarnation);
2048 		free_atid(sc, toep->tid);
2049 #if 0
2050 		toep->tid = -1;
2051 #endif
2052 		mtx_lock(&td->toep_list_lock);
2053 		toep->flags &= ~TPF_IN_TOEP_LIST;
2054 		TAILQ_REMOVE(&td->toep_list, toep, link);
2055 		TAILQ_INSERT_TAIL(&td->stranded_atids, toep, link);
2056 		mtx_unlock(&td->toep_list_lock);
2057 	}
2058 	MPASS(atomic_load_int(&t->atids_in_use) == 0);
2059 }
2060 
2061 static void
2062 stop_tids(struct adapter *sc)
2063 {
2064 	struct tom_data *td = sc->tom_softc;
2065 	struct toepcb *toep;
2066 #ifdef INVARIANTS
2067 	struct tid_info *t = &sc->tids;
2068 #endif
2069 
2070 	/*
2071 	 * The LLD's offload queues are stopped so do_act_establish and
2072 	 * do_pass_accept_req cannot run and insert tids in parallel with this
2073 	 * thread.  stop_stid_tab has also run and removed the synq entries'
2074 	 * tids from the table.  The only tids in the table are for connections
2075 	 * at or beyond ESTABLISHED that are still waiting for the final CPL.
2076 	 */
2077 	mtx_lock(&td->toep_list_lock);
2078 	TAILQ_FOREACH(toep, &td->toep_list, link) {
2079 		MPASS(sc->incarnation == toep->incarnation);
2080 		MPASS(toep->tid >= 0);
2081 		MPASS(toep == lookup_tid(sc, toep->tid));
2082 		/* Remove tid from the lookup table immediately. */
2083 		CTR(KTR_CXGBE, "%s: tid %d@%d STRANDED, removed from table",
2084 		    __func__, toep->tid, toep->incarnation);
2085 		remove_tid(sc, toep->tid, toep->ce ? 2 : 1);
2086 #if 0
2087 		/* toep->tid is stale now but left alone for debug. */
2088 		toep->tid = -1;
2089 #endif
2090 		/* All toep in this list will get bulk moved to stranded_tids */
2091 		toep->flags &= ~TPF_IN_TOEP_LIST;
2092 	}
2093 	MPASS(TAILQ_EMPTY(&td->stranded_tids));
2094 	TAILQ_CONCAT(&td->stranded_tids, &td->toep_list, link);
2095 	MPASS(TAILQ_EMPTY(&td->toep_list));
2096 	mtx_unlock(&td->toep_list_lock);
2097 
2098 	MPASS(atomic_load_int(&t->tids_in_use) == 0);
2099 }
2100 
2101 /*
2102  * L2T is stable because
2103  * 1. stop_lld stopped all new allocations.
2104  * 2. stop_lld also stopped the tx wrq so nothing is enqueueing new WRs to the
2105  *    queue or to l2t_entry->wr_list.
2106  * 3. t4_l2t_update is ignoring all L2 updates.
2107  */
2108 static void
2109 stop_tom_l2t(struct adapter *sc)
2110 {
2111 	struct l2t_data *d = sc->l2t;
2112 	struct tom_data *td = sc->tom_softc;
2113 	struct l2t_entry *e;
2114 	struct wrqe *wr;
2115 	int i;
2116 
2117 	/*
2118 	 * This task cannot be enqueued because L2 state changes are not being
2119 	 * processed.  But if it's already scheduled or running then we need to
2120 	 * wait for it to cleanup the atids in the unsent_wr_list.
2121 	 */
2122 	taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources);
2123 	MPASS(STAILQ_EMPTY(&td->unsent_wr_list));
2124 
2125 	for (i = 0; i < d->l2t_size; i++) {
2126 		e = &d->l2tab[i];
2127 		mtx_lock(&e->lock);
2128 		if (e->state == L2T_STATE_VALID || e->state == L2T_STATE_STALE)
2129 			e->state = L2T_STATE_RESOLVING;
2130 		/*
2131 		 * stop_atids is going to clean up _all_ atids in use, including
2132 		 * these that were pending L2 resolution.  Just discard the WRs.
2133 		 */
2134 		while ((wr = STAILQ_FIRST(&e->wr_list)) != NULL) {
2135 			STAILQ_REMOVE_HEAD(&e->wr_list, link);
2136 			free(wr, M_CXGBE);
2137 		}
2138 		mtx_unlock(&e->lock);
2139 	}
2140 }
2141 
2142 static int
2143 t4_tom_stop(struct adapter *sc)
2144 {
2145 	struct tid_info *t = &sc->tids;
2146 	struct tom_data *td = sc->tom_softc;
2147 
2148 	ASSERT_SYNCHRONIZED_OP(sc);
2149 
2150 	stop_tom_l2t(sc);
2151 	if (atomic_load_int(&t->atids_in_use) > 0)
2152 		stop_atids(sc);
2153 	if (atomic_load_int(&t->stids_in_use) > 0)
2154 		stop_stid_tab(sc);
2155 	if (atomic_load_int(&t->tids_in_use) > 0)
2156 		stop_tids(sc);
2157 	taskqueue_enqueue(taskqueue_thread, &td->cleanup_stranded_tids);
2158 
2159 	/*
2160 	 * L2T and atid_tab are restarted before t4_tom_restart so this assert
2161 	 * is not valid in t4_tom_restart.  This is the next best place for it.
2162 	 */
2163 	MPASS(STAILQ_EMPTY(&td->unsent_wr_list));
2164 
2165 	return (0);
2166 }
2167 
2168 static int
2169 t4_tom_restart(struct adapter *sc)
2170 {
2171 	ASSERT_SYNCHRONIZED_OP(sc);
2172 
2173 	restart_stid_tab(sc);
2174 
2175 	return (0);
2176 }
2177 
2178 static int
2179 t4_ctloutput_tom(struct socket *so, struct sockopt *sopt)
2180 {
2181 	struct tcpcb *tp = sototcpcb(so);
2182 	struct toepcb *toep = tp->t_toe;
2183 	int error, optval;
2184 
2185 	if (sopt->sopt_level == IPPROTO_TCP && sopt->sopt_name == TCP_USE_DDP) {
2186 		if (sopt->sopt_dir != SOPT_SET)
2187 			return (EOPNOTSUPP);
2188 
2189 		if (sopt->sopt_td != NULL) {
2190 			/* Only settable by the kernel. */
2191 			return (EPERM);
2192 		}
2193 
2194 		error = sooptcopyin(sopt, &optval, sizeof(optval),
2195 		    sizeof(optval));
2196 		if (error != 0)
2197 			return (error);
2198 
2199 		if (optval != 0)
2200 			return (t4_enable_ddp_rcv(so, toep));
2201 		else
2202 			return (EOPNOTSUPP);
2203 	}
2204 	return (tcp_ctloutput(so, sopt));
2205 }
2206 
2207 static int
2208 t4_aio_queue_tom(struct socket *so, struct kaiocb *job)
2209 {
2210 	struct tcpcb *tp = sototcpcb(so);
2211 	struct toepcb *toep = tp->t_toe;
2212 	int error;
2213 
2214 	/*
2215 	 * No lock is needed as TOE sockets never change between
2216 	 * active and passive.
2217 	 */
2218 	if (SOLISTENING(so))
2219 		return (EINVAL);
2220 
2221 	if (ulp_mode(toep) == ULP_MODE_TCPDDP ||
2222 	    ulp_mode(toep) == ULP_MODE_NONE) {
2223 		error = t4_aio_queue_ddp(so, job);
2224 		if (error == 0)
2225 			return (0);
2226 		else if (error != EOPNOTSUPP)
2227 			return (soaio_queue_generic(so, job));
2228 	}
2229 
2230 	if (t4_aio_queue_aiotx(so, job) != 0)
2231 		return (soaio_queue_generic(so, job));
2232 	else
2233 		return (0);
2234 }
2235 
2236 static int
2237 t4_tom_mod_load(void)
2238 {
2239 	/* CPL handlers */
2240 	t4_register_cpl_handler(CPL_GET_TCB_RPL, do_get_tcb_rpl);
2241 	t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl2,
2242 	    CPL_COOKIE_TOM);
2243 	t4_init_connect_cpl_handlers();
2244 	t4_init_listen_cpl_handlers();
2245 	t4_init_cpl_io_handlers();
2246 
2247 	t4_ddp_mod_load();
2248 	t4_tls_mod_load();
2249 
2250 	bcopy(&tcp_protosw, &toe_protosw, sizeof(toe_protosw));
2251 	toe_protosw.pr_ctloutput = t4_ctloutput_tom;
2252 	toe_protosw.pr_aio_queue = t4_aio_queue_tom;
2253 
2254 	bcopy(&tcp6_protosw, &toe6_protosw, sizeof(toe6_protosw));
2255 	toe6_protosw.pr_ctloutput = t4_ctloutput_tom;
2256 	toe6_protosw.pr_aio_queue = t4_aio_queue_tom;
2257 
2258 	return (t4_register_uld(&tom_uld_info, ULD_TOM));
2259 }
2260 
2261 static void
2262 tom_uninit(struct adapter *sc, void *arg)
2263 {
2264 	bool *ok_to_unload = arg;
2265 
2266 	if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tomun"))
2267 		return;
2268 
2269 	/* Try to free resources (works only if no port has IFCAP_TOE) */
2270 	if (uld_active(sc, ULD_TOM) && t4_deactivate_uld(sc, ULD_TOM) != 0)
2271 		*ok_to_unload = false;
2272 
2273 	end_synchronized_op(sc, 0);
2274 }
2275 
2276 static int
2277 t4_tom_mod_unload(void)
2278 {
2279 	bool ok_to_unload = true;
2280 
2281 	t4_iterate(tom_uninit, &ok_to_unload);
2282 	if (!ok_to_unload)
2283 		return (EBUSY);
2284 
2285 	if (t4_unregister_uld(&tom_uld_info, ULD_TOM) == EBUSY)
2286 		return (EBUSY);
2287 
2288 	t4_tls_mod_unload();
2289 	t4_ddp_mod_unload();
2290 
2291 	t4_uninit_connect_cpl_handlers();
2292 	t4_uninit_listen_cpl_handlers();
2293 	t4_uninit_cpl_io_handlers();
2294 	t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, NULL, CPL_COOKIE_TOM);
2295 	t4_register_cpl_handler(CPL_GET_TCB_RPL, NULL);
2296 
2297 	return (0);
2298 }
2299 #endif	/* TCP_OFFLOAD */
2300 
2301 static int
2302 t4_tom_modevent(module_t mod, int cmd, void *arg)
2303 {
2304 	int rc = 0;
2305 
2306 #ifdef TCP_OFFLOAD
2307 	switch (cmd) {
2308 	case MOD_LOAD:
2309 		rc = t4_tom_mod_load();
2310 		break;
2311 
2312 	case MOD_UNLOAD:
2313 		rc = t4_tom_mod_unload();
2314 		break;
2315 
2316 	default:
2317 		rc = EINVAL;
2318 	}
2319 #else
2320 	printf("t4_tom: compiled without TCP_OFFLOAD support.\n");
2321 	rc = EOPNOTSUPP;
2322 #endif
2323 	return (rc);
2324 }
2325 
2326 static moduledata_t t4_tom_moddata= {
2327 	"t4_tom",
2328 	t4_tom_modevent,
2329 	0
2330 };
2331 
2332 MODULE_VERSION(t4_tom, 1);
2333 MODULE_DEPEND(t4_tom, toecore, 1, 1, 1);
2334 MODULE_DEPEND(t4_tom, t4nex, 1, 1, 1);
2335 DECLARE_MODULE(t4_tom, t4_tom_moddata, SI_SUB_EXEC, SI_ORDER_ANY);
2336