xref: /freebsd/sys/dev/cxgbe/tom/t4_tom.c (revision 86aa9539fef591a363b06a0ebd3aa7a07f4c1579)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2012 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: Navdeep Parhar <np@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_inet.h"
34 #include "opt_inet6.h"
35 #include "opt_ratelimit.h"
36 
37 #include <sys/param.h>
38 #include <sys/types.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/ktr.h>
42 #include <sys/lock.h>
43 #include <sys/limits.h>
44 #include <sys/module.h>
45 #include <sys/protosw.h>
46 #include <sys/domain.h>
47 #include <sys/refcount.h>
48 #include <sys/rmlock.h>
49 #include <sys/socket.h>
50 #include <sys/socketvar.h>
51 #include <sys/taskqueue.h>
52 #include <net/if.h>
53 #include <net/if_var.h>
54 #include <net/if_types.h>
55 #include <net/if_vlan_var.h>
56 #include <netinet/in.h>
57 #include <netinet/in_pcb.h>
58 #include <netinet/in_var.h>
59 #include <netinet/ip.h>
60 #include <netinet/ip6.h>
61 #include <netinet6/scope6_var.h>
62 #define TCPSTATES
63 #include <netinet/tcp_fsm.h>
64 #include <netinet/tcp_timer.h>
65 #include <netinet/tcp_var.h>
66 #include <netinet/toecore.h>
67 
68 #ifdef TCP_OFFLOAD
69 #include "common/common.h"
70 #include "common/t4_msg.h"
71 #include "common/t4_regs.h"
72 #include "common/t4_regs_values.h"
73 #include "common/t4_tcb.h"
74 #include "t4_clip.h"
75 #include "tom/t4_tom_l2t.h"
76 #include "tom/t4_tom.h"
77 #include "tom/t4_tls.h"
78 
79 static struct protosw toe_protosw;
80 static struct pr_usrreqs toe_usrreqs;
81 
82 static struct protosw toe6_protosw;
83 static struct pr_usrreqs toe6_usrreqs;
84 
85 /* Module ops */
86 static int t4_tom_mod_load(void);
87 static int t4_tom_mod_unload(void);
88 static int t4_tom_modevent(module_t, int, void *);
89 
90 /* ULD ops and helpers */
91 static int t4_tom_activate(struct adapter *);
92 static int t4_tom_deactivate(struct adapter *);
93 
94 static struct uld_info tom_uld_info = {
95 	.uld_id = ULD_TOM,
96 	.activate = t4_tom_activate,
97 	.deactivate = t4_tom_deactivate,
98 };
99 
100 static void release_offload_resources(struct toepcb *);
101 static int alloc_tid_tabs(struct tid_info *);
102 static void free_tid_tabs(struct tid_info *);
103 static void free_tom_data(struct adapter *, struct tom_data *);
104 static void reclaim_wr_resources(void *, int);
105 
106 struct toepcb *
107 alloc_toepcb(struct vi_info *vi, int txqid, int rxqid, int flags)
108 {
109 	struct port_info *pi = vi->pi;
110 	struct adapter *sc = pi->adapter;
111 	struct toepcb *toep;
112 	int tx_credits, txsd_total, len;
113 
114 	/*
115 	 * The firmware counts tx work request credits in units of 16 bytes
116 	 * each.  Reserve room for an ABORT_REQ so the driver never has to worry
117 	 * about tx credits if it wants to abort a connection.
118 	 */
119 	tx_credits = sc->params.ofldq_wr_cred;
120 	tx_credits -= howmany(sizeof(struct cpl_abort_req), 16);
121 
122 	/*
123 	 * Shortest possible tx work request is a fw_ofld_tx_data_wr + 1 byte
124 	 * immediate payload, and firmware counts tx work request credits in
125 	 * units of 16 byte.  Calculate the maximum work requests possible.
126 	 */
127 	txsd_total = tx_credits /
128 	    howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16);
129 
130 	KASSERT(txqid >= vi->first_ofld_txq &&
131 	    txqid < vi->first_ofld_txq + vi->nofldtxq,
132 	    ("%s: txqid %d for vi %p (first %d, n %d)", __func__, txqid, vi,
133 		vi->first_ofld_txq, vi->nofldtxq));
134 
135 	KASSERT(rxqid >= vi->first_ofld_rxq &&
136 	    rxqid < vi->first_ofld_rxq + vi->nofldrxq,
137 	    ("%s: rxqid %d for vi %p (first %d, n %d)", __func__, rxqid, vi,
138 		vi->first_ofld_rxq, vi->nofldrxq));
139 
140 	len = offsetof(struct toepcb, txsd) +
141 	    txsd_total * sizeof(struct ofld_tx_sdesc);
142 
143 	toep = malloc(len, M_CXGBE, M_ZERO | flags);
144 	if (toep == NULL)
145 		return (NULL);
146 
147 	refcount_init(&toep->refcount, 1);
148 	toep->td = sc->tom_softc;
149 	toep->vi = vi;
150 	toep->tc_idx = -1;
151 	toep->tx_total = tx_credits;
152 	toep->tx_credits = tx_credits;
153 	toep->ofld_txq = &sc->sge.ofld_txq[txqid];
154 	toep->ofld_rxq = &sc->sge.ofld_rxq[rxqid];
155 	toep->ctrlq = &sc->sge.ctrlq[pi->port_id];
156 	mbufq_init(&toep->ulp_pduq, INT_MAX);
157 	mbufq_init(&toep->ulp_pdu_reclaimq, INT_MAX);
158 	toep->txsd_total = txsd_total;
159 	toep->txsd_avail = txsd_total;
160 	toep->txsd_pidx = 0;
161 	toep->txsd_cidx = 0;
162 	aiotx_init_toep(toep);
163 
164 	return (toep);
165 }
166 
167 struct toepcb *
168 hold_toepcb(struct toepcb *toep)
169 {
170 
171 	refcount_acquire(&toep->refcount);
172 	return (toep);
173 }
174 
175 void
176 free_toepcb(struct toepcb *toep)
177 {
178 
179 	if (refcount_release(&toep->refcount) == 0)
180 		return;
181 
182 	KASSERT(!(toep->flags & TPF_ATTACHED),
183 	    ("%s: attached to an inpcb", __func__));
184 	KASSERT(!(toep->flags & TPF_CPL_PENDING),
185 	    ("%s: CPL pending", __func__));
186 
187 	if (toep->ulp_mode == ULP_MODE_TCPDDP)
188 		ddp_uninit_toep(toep);
189 	tls_uninit_toep(toep);
190 	free(toep, M_CXGBE);
191 }
192 
193 /*
194  * Set up the socket for TCP offload.
195  */
196 void
197 offload_socket(struct socket *so, struct toepcb *toep)
198 {
199 	struct tom_data *td = toep->td;
200 	struct inpcb *inp = sotoinpcb(so);
201 	struct tcpcb *tp = intotcpcb(inp);
202 	struct sockbuf *sb;
203 
204 	INP_WLOCK_ASSERT(inp);
205 
206 	/* Update socket */
207 	sb = &so->so_snd;
208 	SOCKBUF_LOCK(sb);
209 	sb->sb_flags |= SB_NOCOALESCE;
210 	SOCKBUF_UNLOCK(sb);
211 	sb = &so->so_rcv;
212 	SOCKBUF_LOCK(sb);
213 	sb->sb_flags |= SB_NOCOALESCE;
214 	if (inp->inp_vflag & INP_IPV6)
215 		so->so_proto = &toe6_protosw;
216 	else
217 		so->so_proto = &toe_protosw;
218 	SOCKBUF_UNLOCK(sb);
219 
220 	/* Update TCP PCB */
221 	tp->tod = &td->tod;
222 	tp->t_toe = toep;
223 	tp->t_flags |= TF_TOE;
224 
225 	/* Install an extra hold on inp */
226 	toep->inp = inp;
227 	toep->flags |= TPF_ATTACHED;
228 	in_pcbref(inp);
229 
230 	/* Add the TOE PCB to the active list */
231 	mtx_lock(&td->toep_list_lock);
232 	TAILQ_INSERT_HEAD(&td->toep_list, toep, link);
233 	mtx_unlock(&td->toep_list_lock);
234 }
235 
236 /* This is _not_ the normal way to "unoffload" a socket. */
237 void
238 undo_offload_socket(struct socket *so)
239 {
240 	struct inpcb *inp = sotoinpcb(so);
241 	struct tcpcb *tp = intotcpcb(inp);
242 	struct toepcb *toep = tp->t_toe;
243 	struct tom_data *td = toep->td;
244 	struct sockbuf *sb;
245 
246 	INP_WLOCK_ASSERT(inp);
247 
248 	sb = &so->so_snd;
249 	SOCKBUF_LOCK(sb);
250 	sb->sb_flags &= ~SB_NOCOALESCE;
251 	SOCKBUF_UNLOCK(sb);
252 	sb = &so->so_rcv;
253 	SOCKBUF_LOCK(sb);
254 	sb->sb_flags &= ~SB_NOCOALESCE;
255 	SOCKBUF_UNLOCK(sb);
256 
257 	tp->tod = NULL;
258 	tp->t_toe = NULL;
259 	tp->t_flags &= ~TF_TOE;
260 
261 	toep->inp = NULL;
262 	toep->flags &= ~TPF_ATTACHED;
263 	if (in_pcbrele_wlocked(inp))
264 		panic("%s: inp freed.", __func__);
265 
266 	mtx_lock(&td->toep_list_lock);
267 	TAILQ_REMOVE(&td->toep_list, toep, link);
268 	mtx_unlock(&td->toep_list_lock);
269 }
270 
271 static void
272 release_offload_resources(struct toepcb *toep)
273 {
274 	struct tom_data *td = toep->td;
275 	struct adapter *sc = td_adapter(td);
276 	int tid = toep->tid;
277 
278 	KASSERT(!(toep->flags & TPF_CPL_PENDING),
279 	    ("%s: %p has CPL pending.", __func__, toep));
280 	KASSERT(!(toep->flags & TPF_ATTACHED),
281 	    ("%s: %p is still attached.", __func__, toep));
282 
283 	CTR5(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p, ce %p)",
284 	    __func__, toep, tid, toep->l2te, toep->ce);
285 
286 	/*
287 	 * These queues should have been emptied at approximately the same time
288 	 * that a normal connection's socket's so_snd would have been purged or
289 	 * drained.  Do _not_ clean up here.
290 	 */
291 	MPASS(mbufq_len(&toep->ulp_pduq) == 0);
292 	MPASS(mbufq_len(&toep->ulp_pdu_reclaimq) == 0);
293 #ifdef INVARIANTS
294 	if (toep->ulp_mode == ULP_MODE_TCPDDP)
295 		ddp_assert_empty(toep);
296 #endif
297 	MPASS(TAILQ_EMPTY(&toep->aiotx_jobq));
298 
299 	if (toep->l2te)
300 		t4_l2t_release(toep->l2te);
301 
302 	if (tid >= 0) {
303 		remove_tid(sc, tid, toep->ce ? 2 : 1);
304 		release_tid(sc, tid, toep->ctrlq);
305 	}
306 
307 	if (toep->ce)
308 		t4_release_lip(sc, toep->ce);
309 
310 	if (toep->tc_idx != -1)
311 		t4_release_cl_rl(sc, toep->vi->pi->port_id, toep->tc_idx);
312 
313 	mtx_lock(&td->toep_list_lock);
314 	TAILQ_REMOVE(&td->toep_list, toep, link);
315 	mtx_unlock(&td->toep_list_lock);
316 
317 	free_toepcb(toep);
318 }
319 
320 /*
321  * The kernel is done with the TCP PCB and this is our opportunity to unhook the
322  * toepcb hanging off of it.  If the TOE driver is also done with the toepcb (no
323  * pending CPL) then it is time to release all resources tied to the toepcb.
324  *
325  * Also gets called when an offloaded active open fails and the TOM wants the
326  * kernel to take the TCP PCB back.
327  */
328 static void
329 t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp)
330 {
331 #if defined(KTR) || defined(INVARIANTS)
332 	struct inpcb *inp = tp->t_inpcb;
333 #endif
334 	struct toepcb *toep = tp->t_toe;
335 
336 	INP_WLOCK_ASSERT(inp);
337 
338 	KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
339 	KASSERT(toep->flags & TPF_ATTACHED,
340 	    ("%s: not attached", __func__));
341 
342 #ifdef KTR
343 	if (tp->t_state == TCPS_SYN_SENT) {
344 		CTR6(KTR_CXGBE, "%s: atid %d, toep %p (0x%x), inp %p (0x%x)",
345 		    __func__, toep->tid, toep, toep->flags, inp,
346 		    inp->inp_flags);
347 	} else {
348 		CTR6(KTR_CXGBE,
349 		    "t4_pcb_detach: tid %d (%s), toep %p (0x%x), inp %p (0x%x)",
350 		    toep->tid, tcpstates[tp->t_state], toep, toep->flags, inp,
351 		    inp->inp_flags);
352 	}
353 #endif
354 
355 	tp->t_toe = NULL;
356 	tp->t_flags &= ~TF_TOE;
357 	toep->flags &= ~TPF_ATTACHED;
358 
359 	if (!(toep->flags & TPF_CPL_PENDING))
360 		release_offload_resources(toep);
361 }
362 
363 /*
364  * setsockopt handler.
365  */
366 static void
367 t4_ctloutput(struct toedev *tod, struct tcpcb *tp, int dir, int name)
368 {
369 	struct adapter *sc = tod->tod_softc;
370 	struct toepcb *toep = tp->t_toe;
371 
372 	if (dir == SOPT_GET)
373 		return;
374 
375 	CTR4(KTR_CXGBE, "%s: tp %p, dir %u, name %u", __func__, tp, dir, name);
376 
377 	switch (name) {
378 	case TCP_NODELAY:
379 		if (tp->t_state != TCPS_ESTABLISHED)
380 			break;
381 		t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS,
382 		    V_TF_NAGLE(1), V_TF_NAGLE(tp->t_flags & TF_NODELAY ? 0 : 1),
383 		    0, 0);
384 		break;
385 	default:
386 		break;
387 	}
388 }
389 
390 static inline uint64_t
391 get_tcb_tflags(const uint64_t *tcb)
392 {
393 
394 	return ((be64toh(tcb[14]) << 32) | (be64toh(tcb[15]) >> 32));
395 }
396 
397 static inline uint32_t
398 get_tcb_field(const uint64_t *tcb, u_int word, uint32_t mask, u_int shift)
399 {
400 #define LAST_WORD ((TCB_SIZE / 4) - 1)
401 	uint64_t t1, t2;
402 	int flit_idx;
403 
404 	MPASS(mask != 0);
405 	MPASS(word <= LAST_WORD);
406 	MPASS(shift < 32);
407 
408 	flit_idx = (LAST_WORD - word) / 2;
409 	if (word & 0x1)
410 		shift += 32;
411 	t1 = be64toh(tcb[flit_idx]) >> shift;
412 	t2 = 0;
413 	if (fls(mask) > 64 - shift) {
414 		/*
415 		 * Will spill over into the next logical flit, which is the flit
416 		 * before this one.  The flit_idx before this one must be valid.
417 		 */
418 		MPASS(flit_idx > 0);
419 		t2 = be64toh(tcb[flit_idx - 1]) << (64 - shift);
420 	}
421 	return ((t2 | t1) & mask);
422 #undef LAST_WORD
423 }
424 #define GET_TCB_FIELD(tcb, F) \
425     get_tcb_field(tcb, W_TCB_##F, M_TCB_##F, S_TCB_##F)
426 
427 /*
428  * Issues a CPL_GET_TCB to read the entire TCB for the tid.
429  */
430 static int
431 send_get_tcb(struct adapter *sc, u_int tid)
432 {
433 	struct cpl_get_tcb *cpl;
434 	struct wrq_cookie cookie;
435 
436 	MPASS(tid < sc->tids.ntids);
437 
438 	cpl = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*cpl), 16),
439 	    &cookie);
440 	if (__predict_false(cpl == NULL))
441 		return (ENOMEM);
442 	bzero(cpl, sizeof(*cpl));
443 	INIT_TP_WR(cpl, tid);
444 	OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_GET_TCB, tid));
445 	cpl->reply_ctrl = htobe16(V_REPLY_CHAN(0) |
446 	    V_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id));
447 	cpl->cookie = 0xff;
448 	commit_wrq_wr(&sc->sge.ctrlq[0], cpl, &cookie);
449 
450 	return (0);
451 }
452 
453 static struct tcb_histent *
454 alloc_tcb_histent(struct adapter *sc, u_int tid, int flags)
455 {
456 	struct tcb_histent *te;
457 
458 	MPASS(flags == M_NOWAIT || flags == M_WAITOK);
459 
460 	te = malloc(sizeof(*te), M_CXGBE, M_ZERO | flags);
461 	if (te == NULL)
462 		return (NULL);
463 	mtx_init(&te->te_lock, "TCB entry", NULL, MTX_DEF);
464 	callout_init_mtx(&te->te_callout, &te->te_lock, 0);
465 	te->te_adapter = sc;
466 	te->te_tid = tid;
467 
468 	return (te);
469 }
470 
471 static void
472 free_tcb_histent(struct tcb_histent *te)
473 {
474 
475 	mtx_destroy(&te->te_lock);
476 	free(te, M_CXGBE);
477 }
478 
479 /*
480  * Start tracking the tid in the TCB history.
481  */
482 int
483 add_tid_to_history(struct adapter *sc, u_int tid)
484 {
485 	struct tcb_histent *te = NULL;
486 	struct tom_data *td = sc->tom_softc;
487 	int rc;
488 
489 	MPASS(tid < sc->tids.ntids);
490 
491 	if (td->tcb_history == NULL)
492 		return (ENXIO);
493 
494 	rw_wlock(&td->tcb_history_lock);
495 	if (td->tcb_history[tid] != NULL) {
496 		rc = EEXIST;
497 		goto done;
498 	}
499 	te = alloc_tcb_histent(sc, tid, M_NOWAIT);
500 	if (te == NULL) {
501 		rc = ENOMEM;
502 		goto done;
503 	}
504 	mtx_lock(&te->te_lock);
505 	rc = send_get_tcb(sc, tid);
506 	if (rc == 0) {
507 		te->te_flags |= TE_RPL_PENDING;
508 		td->tcb_history[tid] = te;
509 	} else {
510 		free(te, M_CXGBE);
511 	}
512 	mtx_unlock(&te->te_lock);
513 done:
514 	rw_wunlock(&td->tcb_history_lock);
515 	return (rc);
516 }
517 
518 static void
519 remove_tcb_histent(struct tcb_histent *te)
520 {
521 	struct adapter *sc = te->te_adapter;
522 	struct tom_data *td = sc->tom_softc;
523 
524 	rw_assert(&td->tcb_history_lock, RA_WLOCKED);
525 	mtx_assert(&te->te_lock, MA_OWNED);
526 	MPASS(td->tcb_history[te->te_tid] == te);
527 
528 	td->tcb_history[te->te_tid] = NULL;
529 	free_tcb_histent(te);
530 	rw_wunlock(&td->tcb_history_lock);
531 }
532 
533 static inline struct tcb_histent *
534 lookup_tcb_histent(struct adapter *sc, u_int tid, bool addrem)
535 {
536 	struct tcb_histent *te;
537 	struct tom_data *td = sc->tom_softc;
538 
539 	MPASS(tid < sc->tids.ntids);
540 
541 	if (td->tcb_history == NULL)
542 		return (NULL);
543 
544 	if (addrem)
545 		rw_wlock(&td->tcb_history_lock);
546 	else
547 		rw_rlock(&td->tcb_history_lock);
548 	te = td->tcb_history[tid];
549 	if (te != NULL) {
550 		mtx_lock(&te->te_lock);
551 		return (te);	/* with both locks held */
552 	}
553 	if (addrem)
554 		rw_wunlock(&td->tcb_history_lock);
555 	else
556 		rw_runlock(&td->tcb_history_lock);
557 
558 	return (te);
559 }
560 
561 static inline void
562 release_tcb_histent(struct tcb_histent *te)
563 {
564 	struct adapter *sc = te->te_adapter;
565 	struct tom_data *td = sc->tom_softc;
566 
567 	mtx_assert(&te->te_lock, MA_OWNED);
568 	mtx_unlock(&te->te_lock);
569 	rw_assert(&td->tcb_history_lock, RA_RLOCKED);
570 	rw_runlock(&td->tcb_history_lock);
571 }
572 
573 static void
574 request_tcb(void *arg)
575 {
576 	struct tcb_histent *te = arg;
577 
578 	mtx_assert(&te->te_lock, MA_OWNED);
579 
580 	/* Noone else is supposed to update the histent. */
581 	MPASS(!(te->te_flags & TE_RPL_PENDING));
582 	if (send_get_tcb(te->te_adapter, te->te_tid) == 0)
583 		te->te_flags |= TE_RPL_PENDING;
584 	else
585 		callout_schedule(&te->te_callout, hz / 100);
586 }
587 
588 static void
589 update_tcb_histent(struct tcb_histent *te, const uint64_t *tcb)
590 {
591 	struct tom_data *td = te->te_adapter->tom_softc;
592 	uint64_t tflags = get_tcb_tflags(tcb);
593 	uint8_t sample = 0;
594 
595 	if (GET_TCB_FIELD(tcb, SND_MAX_RAW) != GET_TCB_FIELD(tcb, SND_UNA_RAW)) {
596 		if (GET_TCB_FIELD(tcb, T_RXTSHIFT) != 0)
597 			sample |= TS_RTO;
598 		if (GET_TCB_FIELD(tcb, T_DUPACKS) != 0)
599 			sample |= TS_DUPACKS;
600 		if (GET_TCB_FIELD(tcb, T_DUPACKS) >= td->dupack_threshold)
601 			sample |= TS_FASTREXMT;
602 	}
603 
604 	if (GET_TCB_FIELD(tcb, SND_MAX_RAW) != 0) {
605 		uint32_t snd_wnd;
606 
607 		sample |= TS_SND_BACKLOGGED;	/* for whatever reason. */
608 
609 		snd_wnd = GET_TCB_FIELD(tcb, RCV_ADV);
610 		if (tflags & V_TF_RECV_SCALE(1))
611 			snd_wnd <<= GET_TCB_FIELD(tcb, RCV_SCALE);
612 		if (GET_TCB_FIELD(tcb, SND_CWND) < snd_wnd)
613 			sample |= TS_CWND_LIMITED;	/* maybe due to CWND */
614 	}
615 
616 	if (tflags & V_TF_CCTRL_ECN(1)) {
617 
618 		/*
619 		 * CE marker on incoming IP hdr, echoing ECE back in the TCP
620 		 * hdr.  Indicates congestion somewhere on the way from the peer
621 		 * to this node.
622 		 */
623 		if (tflags & V_TF_CCTRL_ECE(1))
624 			sample |= TS_ECN_ECE;
625 
626 		/*
627 		 * ECE seen and CWR sent (or about to be sent).  Might indicate
628 		 * congestion on the way to the peer.  This node is reducing its
629 		 * congestion window in response.
630 		 */
631 		if (tflags & (V_TF_CCTRL_CWR(1) | V_TF_CCTRL_RFR(1)))
632 			sample |= TS_ECN_CWR;
633 	}
634 
635 	te->te_sample[te->te_pidx] = sample;
636 	if (++te->te_pidx == nitems(te->te_sample))
637 		te->te_pidx = 0;
638 	memcpy(te->te_tcb, tcb, TCB_SIZE);
639 	te->te_flags |= TE_ACTIVE;
640 }
641 
642 static int
643 do_get_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
644 {
645 	struct adapter *sc = iq->adapter;
646 	const struct cpl_get_tcb_rpl *cpl = mtod(m, const void *);
647 	const uint64_t *tcb = (const uint64_t *)(const void *)(cpl + 1);
648 	struct tcb_histent *te;
649 	const u_int tid = GET_TID(cpl);
650 	bool remove;
651 
652 	remove = GET_TCB_FIELD(tcb, T_STATE) == TCPS_CLOSED;
653 	te = lookup_tcb_histent(sc, tid, remove);
654 	if (te == NULL) {
655 		/* Not in the history.  Who issued the GET_TCB for this? */
656 		device_printf(sc->dev, "tcb %u: flags 0x%016jx, state %u, "
657 		    "srtt %u, sscale %u, rscale %u, cookie 0x%x\n", tid,
658 		    (uintmax_t)get_tcb_tflags(tcb), GET_TCB_FIELD(tcb, T_STATE),
659 		    GET_TCB_FIELD(tcb, T_SRTT), GET_TCB_FIELD(tcb, SND_SCALE),
660 		    GET_TCB_FIELD(tcb, RCV_SCALE), cpl->cookie);
661 		goto done;
662 	}
663 
664 	MPASS(te->te_flags & TE_RPL_PENDING);
665 	te->te_flags &= ~TE_RPL_PENDING;
666 	if (remove) {
667 		remove_tcb_histent(te);
668 	} else {
669 		update_tcb_histent(te, tcb);
670 		callout_reset(&te->te_callout, hz / 10, request_tcb, te);
671 		release_tcb_histent(te);
672 	}
673 done:
674 	m_freem(m);
675 	return (0);
676 }
677 
678 static void
679 fill_tcp_info_from_tcb(struct adapter *sc, uint64_t *tcb, struct tcp_info *ti)
680 {
681 	uint32_t v;
682 
683 	ti->tcpi_state = GET_TCB_FIELD(tcb, T_STATE);
684 
685 	v = GET_TCB_FIELD(tcb, T_SRTT);
686 	ti->tcpi_rtt = tcp_ticks_to_us(sc, v);
687 
688 	v = GET_TCB_FIELD(tcb, T_RTTVAR);
689 	ti->tcpi_rttvar = tcp_ticks_to_us(sc, v);
690 
691 	ti->tcpi_snd_ssthresh = GET_TCB_FIELD(tcb, SND_SSTHRESH);
692 	ti->tcpi_snd_cwnd = GET_TCB_FIELD(tcb, SND_CWND);
693 	ti->tcpi_rcv_nxt = GET_TCB_FIELD(tcb, RCV_NXT);
694 
695 	v = GET_TCB_FIELD(tcb, TX_MAX);
696 	ti->tcpi_snd_nxt = v - GET_TCB_FIELD(tcb, SND_NXT_RAW);
697 
698 	/* Receive window being advertised by us. */
699 	ti->tcpi_rcv_wscale = GET_TCB_FIELD(tcb, SND_SCALE);	/* Yes, SND. */
700 	ti->tcpi_rcv_space = GET_TCB_FIELD(tcb, RCV_WND);
701 
702 	/* Send window */
703 	ti->tcpi_snd_wscale = GET_TCB_FIELD(tcb, RCV_SCALE);	/* Yes, RCV. */
704 	ti->tcpi_snd_wnd = GET_TCB_FIELD(tcb, RCV_ADV);
705 	if (get_tcb_tflags(tcb) & V_TF_RECV_SCALE(1))
706 		ti->tcpi_snd_wnd <<= ti->tcpi_snd_wscale;
707 	else
708 		ti->tcpi_snd_wscale = 0;
709 
710 }
711 
712 static void
713 fill_tcp_info_from_history(struct adapter *sc, struct tcb_histent *te,
714     struct tcp_info *ti)
715 {
716 
717 	fill_tcp_info_from_tcb(sc, te->te_tcb, ti);
718 }
719 
720 /*
721  * Reads the TCB for the given tid using a memory window and copies it to 'buf'
722  * in the same format as CPL_GET_TCB_RPL.
723  */
724 static void
725 read_tcb_using_memwin(struct adapter *sc, u_int tid, uint64_t *buf)
726 {
727 	int i, j, k, rc;
728 	uint32_t addr;
729 	u_char *tcb, tmp;
730 
731 	MPASS(tid < sc->tids.ntids);
732 
733 	addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
734 	rc = read_via_memwin(sc, 2, addr, (uint32_t *)buf, TCB_SIZE);
735 	if (rc != 0)
736 		return;
737 
738 	tcb = (u_char *)buf;
739 	for (i = 0, j = TCB_SIZE - 16; i < j; i += 16, j -= 16) {
740 		for (k = 0; k < 16; k++) {
741 			tmp = tcb[i + k];
742 			tcb[i + k] = tcb[j + k];
743 			tcb[j + k] = tmp;
744 		}
745 	}
746 }
747 
748 static void
749 fill_tcp_info(struct adapter *sc, u_int tid, struct tcp_info *ti)
750 {
751 	uint64_t tcb[TCB_SIZE / sizeof(uint64_t)];
752 	struct tcb_histent *te;
753 
754 	ti->tcpi_toe_tid = tid;
755 	te = lookup_tcb_histent(sc, tid, false);
756 	if (te != NULL) {
757 		fill_tcp_info_from_history(sc, te, ti);
758 		release_tcb_histent(te);
759 	} else {
760 		if (!(sc->debug_flags & DF_DISABLE_TCB_CACHE)) {
761 			/* XXX: tell firmware to flush TCB cache. */
762 		}
763 		read_tcb_using_memwin(sc, tid, tcb);
764 		fill_tcp_info_from_tcb(sc, tcb, ti);
765 	}
766 }
767 
768 /*
769  * Called by the kernel to allow the TOE driver to "refine" values filled up in
770  * the tcp_info for an offloaded connection.
771  */
772 static void
773 t4_tcp_info(struct toedev *tod, struct tcpcb *tp, struct tcp_info *ti)
774 {
775 	struct adapter *sc = tod->tod_softc;
776 	struct toepcb *toep = tp->t_toe;
777 
778 	INP_WLOCK_ASSERT(tp->t_inpcb);
779 	MPASS(ti != NULL);
780 
781 	fill_tcp_info(sc, toep->tid, ti);
782 }
783 
784 /*
785  * The TOE driver will not receive any more CPLs for the tid associated with the
786  * toepcb; release the hold on the inpcb.
787  */
788 void
789 final_cpl_received(struct toepcb *toep)
790 {
791 	struct inpcb *inp = toep->inp;
792 
793 	KASSERT(inp != NULL, ("%s: inp is NULL", __func__));
794 	INP_WLOCK_ASSERT(inp);
795 	KASSERT(toep->flags & TPF_CPL_PENDING,
796 	    ("%s: CPL not pending already?", __func__));
797 
798 	CTR6(KTR_CXGBE, "%s: tid %d, toep %p (0x%x), inp %p (0x%x)",
799 	    __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags);
800 
801 	if (toep->ulp_mode == ULP_MODE_TCPDDP)
802 		release_ddp_resources(toep);
803 	toep->inp = NULL;
804 	toep->flags &= ~TPF_CPL_PENDING;
805 	mbufq_drain(&toep->ulp_pdu_reclaimq);
806 
807 	if (!(toep->flags & TPF_ATTACHED))
808 		release_offload_resources(toep);
809 
810 	if (!in_pcbrele_wlocked(inp))
811 		INP_WUNLOCK(inp);
812 }
813 
814 void
815 insert_tid(struct adapter *sc, int tid, void *ctx, int ntids)
816 {
817 	struct tid_info *t = &sc->tids;
818 
819 	MPASS(tid >= t->tid_base);
820 	MPASS(tid - t->tid_base < t->ntids);
821 
822 	t->tid_tab[tid - t->tid_base] = ctx;
823 	atomic_add_int(&t->tids_in_use, ntids);
824 }
825 
826 void *
827 lookup_tid(struct adapter *sc, int tid)
828 {
829 	struct tid_info *t = &sc->tids;
830 
831 	return (t->tid_tab[tid - t->tid_base]);
832 }
833 
834 void
835 update_tid(struct adapter *sc, int tid, void *ctx)
836 {
837 	struct tid_info *t = &sc->tids;
838 
839 	t->tid_tab[tid - t->tid_base] = ctx;
840 }
841 
842 void
843 remove_tid(struct adapter *sc, int tid, int ntids)
844 {
845 	struct tid_info *t = &sc->tids;
846 
847 	t->tid_tab[tid - t->tid_base] = NULL;
848 	atomic_subtract_int(&t->tids_in_use, ntids);
849 }
850 
851 /*
852  * What mtu_idx to use, given a 4-tuple.  Note that both s->mss and tcp_mssopt
853  * have the MSS that we should advertise in our SYN.  Advertised MSS doesn't
854  * account for any TCP options so the effective MSS (only payload, no headers or
855  * options) could be different.
856  */
857 int
858 find_best_mtu_idx(struct adapter *sc, struct in_conninfo *inc,
859     struct offload_settings *s)
860 {
861 	unsigned short *mtus = &sc->params.mtus[0];
862 	int i, mss, mtu;
863 
864 	MPASS(inc != NULL);
865 
866 	mss = s->mss > 0 ? s->mss : tcp_mssopt(inc);
867 	if (inc->inc_flags & INC_ISIPV6)
868 		mtu = mss + sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
869 	else
870 		mtu = mss + sizeof(struct ip) + sizeof(struct tcphdr);
871 
872 	for (i = 0; i < NMTUS - 1 && mtus[i + 1] <= mtu; i++)
873 		continue;
874 
875 	return (i);
876 }
877 
878 /*
879  * Determine the receive window size for a socket.
880  */
881 u_long
882 select_rcv_wnd(struct socket *so)
883 {
884 	unsigned long wnd;
885 
886 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
887 
888 	wnd = sbspace(&so->so_rcv);
889 	if (wnd < MIN_RCV_WND)
890 		wnd = MIN_RCV_WND;
891 
892 	return min(wnd, MAX_RCV_WND);
893 }
894 
895 int
896 select_rcv_wscale(void)
897 {
898 	int wscale = 0;
899 	unsigned long space = sb_max;
900 
901 	if (space > MAX_RCV_WND)
902 		space = MAX_RCV_WND;
903 
904 	while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < space)
905 		wscale++;
906 
907 	return (wscale);
908 }
909 
910 /*
911  * socket so could be a listening socket too.
912  */
913 uint64_t
914 calc_opt0(struct socket *so, struct vi_info *vi, struct l2t_entry *e,
915     int mtu_idx, int rscale, int rx_credits, int ulp_mode,
916     struct offload_settings *s)
917 {
918 	int keepalive;
919 	uint64_t opt0;
920 
921 	MPASS(so != NULL);
922 	MPASS(vi != NULL);
923 	KASSERT(rx_credits <= M_RCV_BUFSIZ,
924 	    ("%s: rcv_bufsiz too high", __func__));
925 
926 	opt0 = F_TCAM_BYPASS | V_WND_SCALE(rscale) | V_MSS_IDX(mtu_idx) |
927 	    V_ULP_MODE(ulp_mode) | V_RCV_BUFSIZ(rx_credits) |
928 	    V_L2T_IDX(e->idx) | V_SMAC_SEL(vi->smt_idx) |
929 	    V_TX_CHAN(vi->pi->tx_chan);
930 
931 	keepalive = tcp_always_keepalive || so_options_get(so) & SO_KEEPALIVE;
932 	opt0 |= V_KEEP_ALIVE(keepalive != 0);
933 
934 	if (s->nagle < 0) {
935 		struct inpcb *inp = sotoinpcb(so);
936 		struct tcpcb *tp = intotcpcb(inp);
937 
938 		opt0 |= V_NAGLE((tp->t_flags & TF_NODELAY) == 0);
939 	} else
940 		opt0 |= V_NAGLE(s->nagle != 0);
941 
942 	return htobe64(opt0);
943 }
944 
945 uint64_t
946 select_ntuple(struct vi_info *vi, struct l2t_entry *e)
947 {
948 	struct adapter *sc = vi->pi->adapter;
949 	struct tp_params *tp = &sc->params.tp;
950 	uint64_t ntuple = 0;
951 
952 	/*
953 	 * Initialize each of the fields which we care about which are present
954 	 * in the Compressed Filter Tuple.
955 	 */
956 	if (tp->vlan_shift >= 0 && EVL_VLANOFTAG(e->vlan) != CPL_L2T_VLAN_NONE)
957 		ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift;
958 
959 	if (tp->port_shift >= 0)
960 		ntuple |= (uint64_t)e->lport << tp->port_shift;
961 
962 	if (tp->protocol_shift >= 0)
963 		ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift;
964 
965 	if (tp->vnic_shift >= 0 && tp->ingress_config & F_VNIC) {
966 		ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vi->vin) |
967 		    V_FT_VNID_ID_PF(sc->pf) | V_FT_VNID_ID_VLD(vi->vfvld)) <<
968 		    tp->vnic_shift;
969 	}
970 
971 	if (is_t4(sc))
972 		return (htobe32((uint32_t)ntuple));
973 	else
974 		return (htobe64(V_FILTER_TUPLE(ntuple)));
975 }
976 
977 static int
978 is_tls_sock(struct socket *so, struct adapter *sc)
979 {
980 	struct inpcb *inp = sotoinpcb(so);
981 	int i, rc;
982 
983 	/* XXX: Eventually add a SO_WANT_TLS socket option perhaps? */
984 	rc = 0;
985 	ADAPTER_LOCK(sc);
986 	for (i = 0; i < sc->tt.num_tls_rx_ports; i++) {
987 		if (inp->inp_lport == htons(sc->tt.tls_rx_ports[i]) ||
988 		    inp->inp_fport == htons(sc->tt.tls_rx_ports[i])) {
989 			rc = 1;
990 			break;
991 		}
992 	}
993 	ADAPTER_UNLOCK(sc);
994 	return (rc);
995 }
996 
997 int
998 select_ulp_mode(struct socket *so, struct adapter *sc,
999     struct offload_settings *s)
1000 {
1001 
1002 	if (can_tls_offload(sc) &&
1003 	    (s->tls > 0 || (s->tls < 0 && is_tls_sock(so, sc))))
1004 		return (ULP_MODE_TLS);
1005 	else if (s->ddp > 0 ||
1006 	    (s->ddp < 0 && sc->tt.ddp && (so->so_options & SO_NO_DDP) == 0))
1007 		return (ULP_MODE_TCPDDP);
1008 	else
1009 		return (ULP_MODE_NONE);
1010 }
1011 
1012 void
1013 set_ulp_mode(struct toepcb *toep, int ulp_mode)
1014 {
1015 
1016 	CTR4(KTR_CXGBE, "%s: toep %p (tid %d) ulp_mode %d",
1017 	    __func__, toep, toep->tid, ulp_mode);
1018 	toep->ulp_mode = ulp_mode;
1019 	tls_init_toep(toep);
1020 	if (toep->ulp_mode == ULP_MODE_TCPDDP)
1021 		ddp_init_toep(toep);
1022 }
1023 
1024 int
1025 negative_advice(int status)
1026 {
1027 
1028 	return (status == CPL_ERR_RTX_NEG_ADVICE ||
1029 	    status == CPL_ERR_PERSIST_NEG_ADVICE ||
1030 	    status == CPL_ERR_KEEPALV_NEG_ADVICE);
1031 }
1032 
1033 static int
1034 alloc_tid_tab(struct tid_info *t, int flags)
1035 {
1036 
1037 	MPASS(t->ntids > 0);
1038 	MPASS(t->tid_tab == NULL);
1039 
1040 	t->tid_tab = malloc(t->ntids * sizeof(*t->tid_tab), M_CXGBE,
1041 	    M_ZERO | flags);
1042 	if (t->tid_tab == NULL)
1043 		return (ENOMEM);
1044 	atomic_store_rel_int(&t->tids_in_use, 0);
1045 
1046 	return (0);
1047 }
1048 
1049 static void
1050 free_tid_tab(struct tid_info *t)
1051 {
1052 
1053 	KASSERT(t->tids_in_use == 0,
1054 	    ("%s: %d tids still in use.", __func__, t->tids_in_use));
1055 
1056 	free(t->tid_tab, M_CXGBE);
1057 	t->tid_tab = NULL;
1058 }
1059 
1060 static int
1061 alloc_stid_tab(struct tid_info *t, int flags)
1062 {
1063 
1064 	MPASS(t->nstids > 0);
1065 	MPASS(t->stid_tab == NULL);
1066 
1067 	t->stid_tab = malloc(t->nstids * sizeof(*t->stid_tab), M_CXGBE,
1068 	    M_ZERO | flags);
1069 	if (t->stid_tab == NULL)
1070 		return (ENOMEM);
1071 	mtx_init(&t->stid_lock, "stid lock", NULL, MTX_DEF);
1072 	t->stids_in_use = 0;
1073 	TAILQ_INIT(&t->stids);
1074 	t->nstids_free_head = t->nstids;
1075 
1076 	return (0);
1077 }
1078 
1079 static void
1080 free_stid_tab(struct tid_info *t)
1081 {
1082 
1083 	KASSERT(t->stids_in_use == 0,
1084 	    ("%s: %d tids still in use.", __func__, t->stids_in_use));
1085 
1086 	if (mtx_initialized(&t->stid_lock))
1087 		mtx_destroy(&t->stid_lock);
1088 	free(t->stid_tab, M_CXGBE);
1089 	t->stid_tab = NULL;
1090 }
1091 
1092 static void
1093 free_tid_tabs(struct tid_info *t)
1094 {
1095 
1096 	free_tid_tab(t);
1097 	free_atid_tab(t);
1098 	free_stid_tab(t);
1099 }
1100 
1101 static int
1102 alloc_tid_tabs(struct tid_info *t)
1103 {
1104 	int rc;
1105 
1106 	rc = alloc_tid_tab(t, M_NOWAIT);
1107 	if (rc != 0)
1108 		goto failed;
1109 
1110 	rc = alloc_atid_tab(t, M_NOWAIT);
1111 	if (rc != 0)
1112 		goto failed;
1113 
1114 	rc = alloc_stid_tab(t, M_NOWAIT);
1115 	if (rc != 0)
1116 		goto failed;
1117 
1118 	return (0);
1119 failed:
1120 	free_tid_tabs(t);
1121 	return (rc);
1122 }
1123 
1124 static inline void
1125 alloc_tcb_history(struct adapter *sc, struct tom_data *td)
1126 {
1127 
1128 	if (sc->tids.ntids == 0 || sc->tids.ntids > 1024)
1129 		return;
1130 	rw_init(&td->tcb_history_lock, "TCB history");
1131 	td->tcb_history = malloc(sc->tids.ntids * sizeof(*td->tcb_history),
1132 	    M_CXGBE, M_ZERO | M_NOWAIT);
1133 	td->dupack_threshold = G_DUPACKTHRESH(t4_read_reg(sc, A_TP_PARA_REG0));
1134 }
1135 
1136 static inline void
1137 free_tcb_history(struct adapter *sc, struct tom_data *td)
1138 {
1139 #ifdef INVARIANTS
1140 	int i;
1141 
1142 	if (td->tcb_history != NULL) {
1143 		for (i = 0; i < sc->tids.ntids; i++) {
1144 			MPASS(td->tcb_history[i] == NULL);
1145 		}
1146 	}
1147 #endif
1148 	free(td->tcb_history, M_CXGBE);
1149 	if (rw_initialized(&td->tcb_history_lock))
1150 		rw_destroy(&td->tcb_history_lock);
1151 }
1152 
1153 static void
1154 free_tom_data(struct adapter *sc, struct tom_data *td)
1155 {
1156 
1157 	ASSERT_SYNCHRONIZED_OP(sc);
1158 
1159 	KASSERT(TAILQ_EMPTY(&td->toep_list),
1160 	    ("%s: TOE PCB list is not empty.", __func__));
1161 	KASSERT(td->lctx_count == 0,
1162 	    ("%s: lctx hash table is not empty.", __func__));
1163 
1164 	t4_free_ppod_region(&td->pr);
1165 
1166 	if (td->listen_mask != 0)
1167 		hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask);
1168 
1169 	if (mtx_initialized(&td->unsent_wr_lock))
1170 		mtx_destroy(&td->unsent_wr_lock);
1171 	if (mtx_initialized(&td->lctx_hash_lock))
1172 		mtx_destroy(&td->lctx_hash_lock);
1173 	if (mtx_initialized(&td->toep_list_lock))
1174 		mtx_destroy(&td->toep_list_lock);
1175 
1176 	free_tcb_history(sc, td);
1177 	free_tid_tabs(&sc->tids);
1178 	free(td, M_CXGBE);
1179 }
1180 
1181 static char *
1182 prepare_pkt(int open_type, uint16_t vtag, struct inpcb *inp, int *pktlen,
1183     int *buflen)
1184 {
1185 	char *pkt;
1186 	struct tcphdr *th;
1187 	int ipv6, len;
1188 	const int maxlen =
1189 	    max(sizeof(struct ether_header), sizeof(struct ether_vlan_header)) +
1190 	    max(sizeof(struct ip), sizeof(struct ip6_hdr)) +
1191 	    sizeof(struct tcphdr);
1192 
1193 	MPASS(open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN);
1194 
1195 	pkt = malloc(maxlen, M_CXGBE, M_ZERO | M_NOWAIT);
1196 	if (pkt == NULL)
1197 		return (NULL);
1198 
1199 	ipv6 = inp->inp_vflag & INP_IPV6;
1200 	len = 0;
1201 
1202 	if (EVL_VLANOFTAG(vtag) == 0xfff) {
1203 		struct ether_header *eh = (void *)pkt;
1204 
1205 		if (ipv6)
1206 			eh->ether_type = htons(ETHERTYPE_IPV6);
1207 		else
1208 			eh->ether_type = htons(ETHERTYPE_IP);
1209 
1210 		len += sizeof(*eh);
1211 	} else {
1212 		struct ether_vlan_header *evh = (void *)pkt;
1213 
1214 		evh->evl_encap_proto = htons(ETHERTYPE_VLAN);
1215 		evh->evl_tag = htons(vtag);
1216 		if (ipv6)
1217 			evh->evl_proto = htons(ETHERTYPE_IPV6);
1218 		else
1219 			evh->evl_proto = htons(ETHERTYPE_IP);
1220 
1221 		len += sizeof(*evh);
1222 	}
1223 
1224 	if (ipv6) {
1225 		struct ip6_hdr *ip6 = (void *)&pkt[len];
1226 
1227 		ip6->ip6_vfc = IPV6_VERSION;
1228 		ip6->ip6_plen = htons(sizeof(struct tcphdr));
1229 		ip6->ip6_nxt = IPPROTO_TCP;
1230 		if (open_type == OPEN_TYPE_ACTIVE) {
1231 			ip6->ip6_src = inp->in6p_laddr;
1232 			ip6->ip6_dst = inp->in6p_faddr;
1233 		} else if (open_type == OPEN_TYPE_LISTEN) {
1234 			ip6->ip6_src = inp->in6p_laddr;
1235 			ip6->ip6_dst = ip6->ip6_src;
1236 		}
1237 
1238 		len += sizeof(*ip6);
1239 	} else {
1240 		struct ip *ip = (void *)&pkt[len];
1241 
1242 		ip->ip_v = IPVERSION;
1243 		ip->ip_hl = sizeof(*ip) >> 2;
1244 		ip->ip_tos = inp->inp_ip_tos;
1245 		ip->ip_len = htons(sizeof(struct ip) + sizeof(struct tcphdr));
1246 		ip->ip_ttl = inp->inp_ip_ttl;
1247 		ip->ip_p = IPPROTO_TCP;
1248 		if (open_type == OPEN_TYPE_ACTIVE) {
1249 			ip->ip_src = inp->inp_laddr;
1250 			ip->ip_dst = inp->inp_faddr;
1251 		} else if (open_type == OPEN_TYPE_LISTEN) {
1252 			ip->ip_src = inp->inp_laddr;
1253 			ip->ip_dst = ip->ip_src;
1254 		}
1255 
1256 		len += sizeof(*ip);
1257 	}
1258 
1259 	th = (void *)&pkt[len];
1260 	if (open_type == OPEN_TYPE_ACTIVE) {
1261 		th->th_sport = inp->inp_lport;	/* network byte order already */
1262 		th->th_dport = inp->inp_fport;	/* ditto */
1263 	} else if (open_type == OPEN_TYPE_LISTEN) {
1264 		th->th_sport = inp->inp_lport;	/* network byte order already */
1265 		th->th_dport = th->th_sport;
1266 	}
1267 	len += sizeof(th);
1268 
1269 	*pktlen = *buflen = len;
1270 	return (pkt);
1271 }
1272 
1273 const struct offload_settings *
1274 lookup_offload_policy(struct adapter *sc, int open_type, struct mbuf *m,
1275     uint16_t vtag, struct inpcb *inp)
1276 {
1277 	const struct t4_offload_policy *op;
1278 	char *pkt;
1279 	struct offload_rule *r;
1280 	int i, matched, pktlen, buflen;
1281 	static const struct offload_settings allow_offloading_settings = {
1282 		.offload = 1,
1283 		.rx_coalesce = -1,
1284 		.cong_algo = -1,
1285 		.sched_class = -1,
1286 		.tstamp = -1,
1287 		.sack = -1,
1288 		.nagle = -1,
1289 		.ecn = -1,
1290 		.ddp = -1,
1291 		.tls = -1,
1292 		.txq = -1,
1293 		.rxq = -1,
1294 		.mss = -1,
1295 	};
1296 	static const struct offload_settings disallow_offloading_settings = {
1297 		.offload = 0,
1298 		/* rest is irrelevant when offload is off. */
1299 	};
1300 
1301 	rw_assert(&sc->policy_lock, RA_LOCKED);
1302 
1303 	/*
1304 	 * If there's no Connection Offloading Policy attached to the device
1305 	 * then we need to return a default static policy.  If
1306 	 * "cop_managed_offloading" is true, then we need to disallow
1307 	 * offloading until a COP is attached to the device.  Otherwise we
1308 	 * allow offloading ...
1309 	 */
1310 	op = sc->policy;
1311 	if (op == NULL) {
1312 		if (sc->tt.cop_managed_offloading)
1313 			return (&disallow_offloading_settings);
1314 		else
1315 			return (&allow_offloading_settings);
1316 	}
1317 
1318 	switch (open_type) {
1319 	case OPEN_TYPE_ACTIVE:
1320 	case OPEN_TYPE_LISTEN:
1321 		pkt = prepare_pkt(open_type, vtag, inp, &pktlen, &buflen);
1322 		break;
1323 	case OPEN_TYPE_PASSIVE:
1324 		MPASS(m != NULL);
1325 		pkt = mtod(m, char *);
1326 		MPASS(*pkt == CPL_PASS_ACCEPT_REQ);
1327 		pkt += sizeof(struct cpl_pass_accept_req);
1328 		pktlen = m->m_pkthdr.len - sizeof(struct cpl_pass_accept_req);
1329 		buflen = m->m_len - sizeof(struct cpl_pass_accept_req);
1330 		break;
1331 	default:
1332 		MPASS(0);
1333 		return (&disallow_offloading_settings);
1334 	}
1335 
1336 	if (pkt == NULL || pktlen == 0 || buflen == 0)
1337 		return (&disallow_offloading_settings);
1338 
1339 	matched = 0;
1340 	r = &op->rule[0];
1341 	for (i = 0; i < op->nrules; i++, r++) {
1342 		if (r->open_type != open_type &&
1343 		    r->open_type != OPEN_TYPE_DONTCARE) {
1344 			continue;
1345 		}
1346 		matched = bpf_filter(r->bpf_prog.bf_insns, pkt, pktlen, buflen);
1347 		if (matched)
1348 			break;
1349 	}
1350 
1351 	if (open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN)
1352 		free(pkt, M_CXGBE);
1353 
1354 	return (matched ? &r->settings : &disallow_offloading_settings);
1355 }
1356 
1357 static void
1358 reclaim_wr_resources(void *arg, int count)
1359 {
1360 	struct tom_data *td = arg;
1361 	STAILQ_HEAD(, wrqe) twr_list = STAILQ_HEAD_INITIALIZER(twr_list);
1362 	struct cpl_act_open_req *cpl;
1363 	u_int opcode, atid, tid;
1364 	struct wrqe *wr;
1365 	struct adapter *sc = td_adapter(td);
1366 
1367 	mtx_lock(&td->unsent_wr_lock);
1368 	STAILQ_SWAP(&td->unsent_wr_list, &twr_list, wrqe);
1369 	mtx_unlock(&td->unsent_wr_lock);
1370 
1371 	while ((wr = STAILQ_FIRST(&twr_list)) != NULL) {
1372 		STAILQ_REMOVE_HEAD(&twr_list, link);
1373 
1374 		cpl = wrtod(wr);
1375 		opcode = GET_OPCODE(cpl);
1376 
1377 		switch (opcode) {
1378 		case CPL_ACT_OPEN_REQ:
1379 		case CPL_ACT_OPEN_REQ6:
1380 			atid = G_TID_TID(be32toh(OPCODE_TID(cpl)));
1381 			CTR2(KTR_CXGBE, "%s: atid %u ", __func__, atid);
1382 			act_open_failure_cleanup(sc, atid, EHOSTUNREACH);
1383 			free(wr, M_CXGBE);
1384 			break;
1385 		case CPL_PASS_ACCEPT_RPL:
1386 			tid = GET_TID(cpl);
1387 			CTR2(KTR_CXGBE, "%s: tid %u ", __func__, tid);
1388 			synack_failure_cleanup(sc, tid);
1389 			free(wr, M_CXGBE);
1390 			break;
1391 		default:
1392 			log(LOG_ERR, "%s: leaked work request %p, wr_len %d, "
1393 			    "opcode %x\n", __func__, wr, wr->wr_len, opcode);
1394 			/* WR not freed here; go look at it with a debugger.  */
1395 		}
1396 	}
1397 }
1398 
1399 /*
1400  * Ground control to Major TOM
1401  * Commencing countdown, engines on
1402  */
1403 static int
1404 t4_tom_activate(struct adapter *sc)
1405 {
1406 	struct tom_data *td;
1407 	struct toedev *tod;
1408 	struct vi_info *vi;
1409 	int i, rc, v;
1410 
1411 	ASSERT_SYNCHRONIZED_OP(sc);
1412 
1413 	/* per-adapter softc for TOM */
1414 	td = malloc(sizeof(*td), M_CXGBE, M_ZERO | M_NOWAIT);
1415 	if (td == NULL)
1416 		return (ENOMEM);
1417 
1418 	/* List of TOE PCBs and associated lock */
1419 	mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF);
1420 	TAILQ_INIT(&td->toep_list);
1421 
1422 	/* Listen context */
1423 	mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF);
1424 	td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE,
1425 	    &td->listen_mask, HASH_NOWAIT);
1426 
1427 	/* List of WRs for which L2 resolution failed */
1428 	mtx_init(&td->unsent_wr_lock, "Unsent WR list lock", NULL, MTX_DEF);
1429 	STAILQ_INIT(&td->unsent_wr_list);
1430 	TASK_INIT(&td->reclaim_wr_resources, 0, reclaim_wr_resources, td);
1431 
1432 	/* TID tables */
1433 	rc = alloc_tid_tabs(&sc->tids);
1434 	if (rc != 0)
1435 		goto done;
1436 
1437 	rc = t4_init_ppod_region(&td->pr, &sc->vres.ddp,
1438 	    t4_read_reg(sc, A_ULP_RX_TDDP_PSZ), "TDDP page pods");
1439 	if (rc != 0)
1440 		goto done;
1441 	t4_set_reg_field(sc, A_ULP_RX_TDDP_TAGMASK,
1442 	    V_TDDPTAGMASK(M_TDDPTAGMASK), td->pr.pr_tag_mask);
1443 
1444 	alloc_tcb_history(sc, td);
1445 
1446 	/* toedev ops */
1447 	tod = &td->tod;
1448 	init_toedev(tod);
1449 	tod->tod_softc = sc;
1450 	tod->tod_connect = t4_connect;
1451 	tod->tod_listen_start = t4_listen_start;
1452 	tod->tod_listen_stop = t4_listen_stop;
1453 	tod->tod_rcvd = t4_rcvd;
1454 	tod->tod_output = t4_tod_output;
1455 	tod->tod_send_rst = t4_send_rst;
1456 	tod->tod_send_fin = t4_send_fin;
1457 	tod->tod_pcb_detach = t4_pcb_detach;
1458 	tod->tod_l2_update = t4_l2_update;
1459 	tod->tod_syncache_added = t4_syncache_added;
1460 	tod->tod_syncache_removed = t4_syncache_removed;
1461 	tod->tod_syncache_respond = t4_syncache_respond;
1462 	tod->tod_offload_socket = t4_offload_socket;
1463 	tod->tod_ctloutput = t4_ctloutput;
1464 	tod->tod_tcp_info = t4_tcp_info;
1465 
1466 	for_each_port(sc, i) {
1467 		for_each_vi(sc->port[i], v, vi) {
1468 			TOEDEV(vi->ifp) = &td->tod;
1469 		}
1470 	}
1471 
1472 	sc->tom_softc = td;
1473 	register_toedev(sc->tom_softc);
1474 
1475 done:
1476 	if (rc != 0)
1477 		free_tom_data(sc, td);
1478 	return (rc);
1479 }
1480 
1481 static int
1482 t4_tom_deactivate(struct adapter *sc)
1483 {
1484 	int rc = 0;
1485 	struct tom_data *td = sc->tom_softc;
1486 
1487 	ASSERT_SYNCHRONIZED_OP(sc);
1488 
1489 	if (td == NULL)
1490 		return (0);	/* XXX. KASSERT? */
1491 
1492 	if (sc->offload_map != 0)
1493 		return (EBUSY);	/* at least one port has IFCAP_TOE enabled */
1494 
1495 	if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI))
1496 		return (EBUSY);	/* both iWARP and iSCSI rely on the TOE. */
1497 
1498 	mtx_lock(&td->toep_list_lock);
1499 	if (!TAILQ_EMPTY(&td->toep_list))
1500 		rc = EBUSY;
1501 	mtx_unlock(&td->toep_list_lock);
1502 
1503 	mtx_lock(&td->lctx_hash_lock);
1504 	if (td->lctx_count > 0)
1505 		rc = EBUSY;
1506 	mtx_unlock(&td->lctx_hash_lock);
1507 
1508 	taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources);
1509 	mtx_lock(&td->unsent_wr_lock);
1510 	if (!STAILQ_EMPTY(&td->unsent_wr_list))
1511 		rc = EBUSY;
1512 	mtx_unlock(&td->unsent_wr_lock);
1513 
1514 	if (rc == 0) {
1515 		unregister_toedev(sc->tom_softc);
1516 		free_tom_data(sc, td);
1517 		sc->tom_softc = NULL;
1518 	}
1519 
1520 	return (rc);
1521 }
1522 
1523 static int
1524 t4_aio_queue_tom(struct socket *so, struct kaiocb *job)
1525 {
1526 	struct tcpcb *tp = so_sototcpcb(so);
1527 	struct toepcb *toep = tp->t_toe;
1528 	int error;
1529 
1530 	if (toep->ulp_mode == ULP_MODE_TCPDDP) {
1531 		error = t4_aio_queue_ddp(so, job);
1532 		if (error != EOPNOTSUPP)
1533 			return (error);
1534 	}
1535 
1536 	return (t4_aio_queue_aiotx(so, job));
1537 }
1538 
1539 static int
1540 t4_ctloutput_tom(struct socket *so, struct sockopt *sopt)
1541 {
1542 
1543 	if (sopt->sopt_level != IPPROTO_TCP)
1544 		return (tcp_ctloutput(so, sopt));
1545 
1546 	switch (sopt->sopt_name) {
1547 	case TCP_TLSOM_SET_TLS_CONTEXT:
1548 	case TCP_TLSOM_GET_TLS_TOM:
1549 	case TCP_TLSOM_CLR_TLS_TOM:
1550 	case TCP_TLSOM_CLR_QUIES:
1551 		return (t4_ctloutput_tls(so, sopt));
1552 	default:
1553 		return (tcp_ctloutput(so, sopt));
1554 	}
1555 }
1556 
1557 static int
1558 t4_tom_mod_load(void)
1559 {
1560 	struct protosw *tcp_protosw, *tcp6_protosw;
1561 
1562 	/* CPL handlers */
1563 	t4_register_cpl_handler(CPL_GET_TCB_RPL, do_get_tcb_rpl);
1564 	t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl2,
1565 	    CPL_COOKIE_TOM);
1566 	t4_init_connect_cpl_handlers();
1567 	t4_init_listen_cpl_handlers();
1568 	t4_init_cpl_io_handlers();
1569 
1570 	t4_ddp_mod_load();
1571 	t4_tls_mod_load();
1572 
1573 	tcp_protosw = pffindproto(PF_INET, IPPROTO_TCP, SOCK_STREAM);
1574 	if (tcp_protosw == NULL)
1575 		return (ENOPROTOOPT);
1576 	bcopy(tcp_protosw, &toe_protosw, sizeof(toe_protosw));
1577 	bcopy(tcp_protosw->pr_usrreqs, &toe_usrreqs, sizeof(toe_usrreqs));
1578 	toe_usrreqs.pru_aio_queue = t4_aio_queue_tom;
1579 	toe_protosw.pr_ctloutput = t4_ctloutput_tom;
1580 	toe_protosw.pr_usrreqs = &toe_usrreqs;
1581 
1582 	tcp6_protosw = pffindproto(PF_INET6, IPPROTO_TCP, SOCK_STREAM);
1583 	if (tcp6_protosw == NULL)
1584 		return (ENOPROTOOPT);
1585 	bcopy(tcp6_protosw, &toe6_protosw, sizeof(toe6_protosw));
1586 	bcopy(tcp6_protosw->pr_usrreqs, &toe6_usrreqs, sizeof(toe6_usrreqs));
1587 	toe6_usrreqs.pru_aio_queue = t4_aio_queue_tom;
1588 	toe6_protosw.pr_ctloutput = t4_ctloutput_tom;
1589 	toe6_protosw.pr_usrreqs = &toe6_usrreqs;
1590 
1591 	return (t4_register_uld(&tom_uld_info));
1592 }
1593 
1594 static void
1595 tom_uninit(struct adapter *sc, void *arg __unused)
1596 {
1597 	if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tomun"))
1598 		return;
1599 
1600 	/* Try to free resources (works only if no port has IFCAP_TOE) */
1601 	if (uld_active(sc, ULD_TOM))
1602 		t4_deactivate_uld(sc, ULD_TOM);
1603 
1604 	end_synchronized_op(sc, 0);
1605 }
1606 
1607 static int
1608 t4_tom_mod_unload(void)
1609 {
1610 	t4_iterate(tom_uninit, NULL);
1611 
1612 	if (t4_unregister_uld(&tom_uld_info) == EBUSY)
1613 		return (EBUSY);
1614 
1615 	t4_tls_mod_unload();
1616 	t4_ddp_mod_unload();
1617 
1618 	t4_uninit_connect_cpl_handlers();
1619 	t4_uninit_listen_cpl_handlers();
1620 	t4_uninit_cpl_io_handlers();
1621 	t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, NULL, CPL_COOKIE_TOM);
1622 
1623 	return (0);
1624 }
1625 #endif	/* TCP_OFFLOAD */
1626 
1627 static int
1628 t4_tom_modevent(module_t mod, int cmd, void *arg)
1629 {
1630 	int rc = 0;
1631 
1632 #ifdef TCP_OFFLOAD
1633 	switch (cmd) {
1634 	case MOD_LOAD:
1635 		rc = t4_tom_mod_load();
1636 		break;
1637 
1638 	case MOD_UNLOAD:
1639 		rc = t4_tom_mod_unload();
1640 		break;
1641 
1642 	default:
1643 		rc = EINVAL;
1644 	}
1645 #else
1646 	printf("t4_tom: compiled without TCP_OFFLOAD support.\n");
1647 	rc = EOPNOTSUPP;
1648 #endif
1649 	return (rc);
1650 }
1651 
1652 static moduledata_t t4_tom_moddata= {
1653 	"t4_tom",
1654 	t4_tom_modevent,
1655 	0
1656 };
1657 
1658 MODULE_VERSION(t4_tom, 1);
1659 MODULE_DEPEND(t4_tom, toecore, 1, 1, 1);
1660 MODULE_DEPEND(t4_tom, t4nex, 1, 1, 1);
1661 DECLARE_MODULE(t4_tom, t4_tom_moddata, SI_SUB_EXEC, SI_ORDER_ANY);
1662