xref: /freebsd/sys/dev/cxgbe/tom/t4_connect.c (revision f8b865d1d62d17626ab993212963277c06cc25b8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2012 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: Navdeep Parhar <np@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_inet.h"
34 #include "opt_inet6.h"
35 
36 #ifdef TCP_OFFLOAD
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/ktr.h>
41 #include <sys/module.h>
42 #include <sys/protosw.h>
43 #include <sys/domain.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
46 #include <sys/sysctl.h>
47 #include <net/ethernet.h>
48 #include <net/if.h>
49 #include <net/if_types.h>
50 #include <net/if_vlan_var.h>
51 #include <net/route.h>
52 #include <netinet/in.h>
53 #include <netinet/in_pcb.h>
54 #include <netinet/ip.h>
55 #define TCPSTATES
56 #include <netinet/tcp_fsm.h>
57 #include <netinet/tcp_var.h>
58 #include <netinet/toecore.h>
59 #include <netinet/cc/cc.h>
60 
61 #include "common/common.h"
62 #include "common/t4_msg.h"
63 #include "common/t4_regs.h"
64 #include "common/t4_regs_values.h"
65 #include "tom/t4_tom_l2t.h"
66 #include "tom/t4_tom.h"
67 
68 /* atid services */
69 static int alloc_atid(struct adapter *, void *);
70 static void *lookup_atid(struct adapter *, int);
71 static void free_atid(struct adapter *, int);
72 
73 static int
74 alloc_atid(struct adapter *sc, void *ctx)
75 {
76 	struct tid_info *t = &sc->tids;
77 	int atid = -1;
78 
79 	mtx_lock(&t->atid_lock);
80 	if (t->afree) {
81 		union aopen_entry *p = t->afree;
82 
83 		atid = p - t->atid_tab;
84 		t->afree = p->next;
85 		p->data = ctx;
86 		t->atids_in_use++;
87 	}
88 	mtx_unlock(&t->atid_lock);
89 	return (atid);
90 }
91 
92 static void *
93 lookup_atid(struct adapter *sc, int atid)
94 {
95 	struct tid_info *t = &sc->tids;
96 
97 	return (t->atid_tab[atid].data);
98 }
99 
100 static void
101 free_atid(struct adapter *sc, int atid)
102 {
103 	struct tid_info *t = &sc->tids;
104 	union aopen_entry *p = &t->atid_tab[atid];
105 
106 	mtx_lock(&t->atid_lock);
107 	p->next = t->afree;
108 	t->afree = p;
109 	t->atids_in_use--;
110 	mtx_unlock(&t->atid_lock);
111 }
112 
113 /*
114  * Active open succeeded.
115  */
116 static int
117 do_act_establish(struct sge_iq *iq, const struct rss_header *rss,
118     struct mbuf *m)
119 {
120 	struct adapter *sc = iq->adapter;
121 	const struct cpl_act_establish *cpl = (const void *)(rss + 1);
122 	u_int tid = GET_TID(cpl);
123 	u_int atid = G_TID_TID(ntohl(cpl->tos_atid));
124 	struct toepcb *toep = lookup_atid(sc, atid);
125 	struct inpcb *inp = toep->inp;
126 
127 	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
128 	KASSERT(toep->tid == atid, ("%s: toep tid/atid mismatch", __func__));
129 
130 	CTR3(KTR_CXGBE, "%s: atid %u, tid %u", __func__, atid, tid);
131 	free_atid(sc, atid);
132 
133 	CURVNET_SET(toep->vnet);
134 	INP_WLOCK(inp);
135 	toep->tid = tid;
136 	insert_tid(sc, tid, toep, inp->inp_vflag & INP_IPV6 ? 2 : 1);
137 	if (inp->inp_flags & INP_DROPPED) {
138 
139 		/* socket closed by the kernel before hw told us it connected */
140 
141 		send_flowc_wr(toep, NULL);
142 		send_reset(sc, toep, be32toh(cpl->snd_isn));
143 		goto done;
144 	}
145 
146 	make_established(toep, cpl->snd_isn, cpl->rcv_isn, cpl->tcp_opt);
147 
148 	if (toep->ulp_mode == ULP_MODE_TLS)
149 		tls_establish(toep);
150 
151 done:
152 	INP_WUNLOCK(inp);
153 	CURVNET_RESTORE();
154 	return (0);
155 }
156 
157 /*
158  * Convert an ACT_OPEN_RPL status to an errno.
159  */
160 static inline int
161 act_open_rpl_status_to_errno(int status)
162 {
163 
164 	switch (status) {
165 	case CPL_ERR_CONN_RESET:
166 		return (ECONNREFUSED);
167 	case CPL_ERR_ARP_MISS:
168 		return (EHOSTUNREACH);
169 	case CPL_ERR_CONN_TIMEDOUT:
170 		return (ETIMEDOUT);
171 	case CPL_ERR_TCAM_FULL:
172 		return (EAGAIN);
173 	case CPL_ERR_CONN_EXIST:
174 		log(LOG_ERR, "ACTIVE_OPEN_RPL: 4-tuple in use\n");
175 		return (EAGAIN);
176 	default:
177 		return (EIO);
178 	}
179 }
180 
181 void
182 act_open_failure_cleanup(struct adapter *sc, u_int atid, u_int status)
183 {
184 	struct toepcb *toep = lookup_atid(sc, atid);
185 	struct inpcb *inp = toep->inp;
186 	struct toedev *tod = &toep->td->tod;
187 
188 	free_atid(sc, atid);
189 	toep->tid = -1;
190 
191 	CURVNET_SET(toep->vnet);
192 	if (status != EAGAIN)
193 		INP_INFO_RLOCK(&V_tcbinfo);
194 	INP_WLOCK(inp);
195 	toe_connect_failed(tod, inp, status);
196 	final_cpl_received(toep);	/* unlocks inp */
197 	if (status != EAGAIN)
198 		INP_INFO_RUNLOCK(&V_tcbinfo);
199 	CURVNET_RESTORE();
200 }
201 
202 /*
203  * Active open failed.
204  */
205 static int
206 do_act_open_rpl(struct sge_iq *iq, const struct rss_header *rss,
207     struct mbuf *m)
208 {
209 	struct adapter *sc = iq->adapter;
210 	const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
211 	u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
212 	u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
213 	struct toepcb *toep = lookup_atid(sc, atid);
214 	int rc;
215 
216 	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
217 	KASSERT(toep->tid == atid, ("%s: toep tid/atid mismatch", __func__));
218 
219 	CTR3(KTR_CXGBE, "%s: atid %u, status %u ", __func__, atid, status);
220 
221 	/* Ignore negative advice */
222 	if (negative_advice(status))
223 		return (0);
224 
225 	if (status && act_open_has_tid(status))
226 		release_tid(sc, GET_TID(cpl), toep->ctrlq);
227 
228 	rc = act_open_rpl_status_to_errno(status);
229 	act_open_failure_cleanup(sc, atid, rc);
230 
231 	return (0);
232 }
233 
234 /*
235  * Options2 for active open.
236  */
237 static uint32_t
238 calc_opt2a(struct socket *so, struct toepcb *toep,
239     const struct offload_settings *s)
240 {
241 	struct tcpcb *tp = so_sototcpcb(so);
242 	struct port_info *pi = toep->vi->pi;
243 	struct adapter *sc = pi->adapter;
244 	uint32_t opt2 = 0;
245 
246 	/*
247 	 * rx flow control, rx coalesce, congestion control, and tx pace are all
248 	 * explicitly set by the driver.  On T5+ the ISS is also set by the
249 	 * driver to the value picked by the kernel.
250 	 */
251 	if (is_t4(sc)) {
252 		opt2 |= F_RX_FC_VALID | F_RX_COALESCE_VALID;
253 		opt2 |= F_CONG_CNTRL_VALID | F_PACE_VALID;
254 	} else {
255 		opt2 |= F_T5_OPT_2_VALID;	/* all 4 valid */
256 		opt2 |= F_T5_ISS;		/* ISS provided in CPL */
257 	}
258 
259 	if (s->sack > 0 || (s->sack < 0 && (tp->t_flags & TF_SACK_PERMIT)))
260 		opt2 |= F_SACK_EN;
261 
262 	if (s->tstamp > 0 || (s->tstamp < 0 && (tp->t_flags & TF_REQ_TSTMP)))
263 		opt2 |= F_TSTAMPS_EN;
264 
265 	if (tp->t_flags & TF_REQ_SCALE)
266 		opt2 |= F_WND_SCALE_EN;
267 
268 	if (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn == 1))
269 		opt2 |= F_CCTRL_ECN;
270 
271 	/* XXX: F_RX_CHANNEL for multiple rx c-chan support goes here. */
272 
273 	opt2 |= V_TX_QUEUE(sc->params.tp.tx_modq[pi->tx_chan]);
274 
275 	/* These defaults are subject to ULP specific fixups later. */
276 	opt2 |= V_RX_FC_DDP(0) | V_RX_FC_DISABLE(0);
277 
278 	opt2 |= V_PACE(0);
279 
280 	if (s->cong_algo >= 0)
281 		opt2 |= V_CONG_CNTRL(s->cong_algo);
282 	else if (sc->tt.cong_algorithm >= 0)
283 		opt2 |= V_CONG_CNTRL(sc->tt.cong_algorithm & M_CONG_CNTRL);
284 	else {
285 		struct cc_algo *cc = CC_ALGO(tp);
286 
287 		if (strcasecmp(cc->name, "reno") == 0)
288 			opt2 |= V_CONG_CNTRL(CONG_ALG_RENO);
289 		else if (strcasecmp(cc->name, "tahoe") == 0)
290 			opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
291 		if (strcasecmp(cc->name, "newreno") == 0)
292 			opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO);
293 		if (strcasecmp(cc->name, "highspeed") == 0)
294 			opt2 |= V_CONG_CNTRL(CONG_ALG_HIGHSPEED);
295 		else {
296 			/*
297 			 * Use newreno in case the algorithm selected by the
298 			 * host stack is not supported by the hardware.
299 			 */
300 			opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO);
301 		}
302 	}
303 
304 	if (s->rx_coalesce > 0 || (s->rx_coalesce < 0 && sc->tt.rx_coalesce))
305 		opt2 |= V_RX_COALESCE(M_RX_COALESCE);
306 
307 	/* Note that ofld_rxq is already set according to s->rxq. */
308 	opt2 |= F_RSS_QUEUE_VALID;
309 	opt2 |= V_RSS_QUEUE(toep->ofld_rxq->iq.abs_id);
310 
311 #ifdef USE_DDP_RX_FLOW_CONTROL
312 	if (toep->ulp_mode == ULP_MODE_TCPDDP)
313 		opt2 |= F_RX_FC_DDP;
314 #endif
315 
316 	if (toep->ulp_mode == ULP_MODE_TLS) {
317 		opt2 &= ~V_RX_COALESCE(M_RX_COALESCE);
318 		opt2 |= F_RX_FC_DISABLE;
319 	}
320 
321 	return (htobe32(opt2));
322 }
323 
324 void
325 t4_init_connect_cpl_handlers(void)
326 {
327 
328 	t4_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
329 	t4_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
330 }
331 
332 void
333 t4_uninit_connect_cpl_handlers(void)
334 {
335 
336 	t4_register_cpl_handler(CPL_ACT_ESTABLISH, NULL);
337 	t4_register_cpl_handler(CPL_ACT_OPEN_RPL, NULL);
338 }
339 
340 #define DONT_OFFLOAD_ACTIVE_OPEN(x)	do { \
341 	reason = __LINE__; \
342 	rc = (x); \
343 	goto failed; \
344 } while (0)
345 
346 static inline int
347 act_open_cpl_size(struct adapter *sc, int isipv6)
348 {
349 	int idx;
350 	static const int sz_table[3][2] = {
351 		{
352 			sizeof (struct cpl_act_open_req),
353 			sizeof (struct cpl_act_open_req6)
354 		},
355 		{
356 			sizeof (struct cpl_t5_act_open_req),
357 			sizeof (struct cpl_t5_act_open_req6)
358 		},
359 		{
360 			sizeof (struct cpl_t6_act_open_req),
361 			sizeof (struct cpl_t6_act_open_req6)
362 		},
363 	};
364 
365 	MPASS(chip_id(sc) >= CHELSIO_T4);
366 	idx = min(chip_id(sc) - CHELSIO_T4, 2);
367 
368 	return (sz_table[idx][!!isipv6]);
369 }
370 
371 /*
372  * active open (soconnect).
373  *
374  * State of affairs on entry:
375  * soisconnecting (so_state |= SS_ISCONNECTING)
376  * tcbinfo not locked (This has changed - used to be WLOCKed)
377  * inp WLOCKed
378  * tp->t_state = TCPS_SYN_SENT
379  * rtalloc1, RT_UNLOCK on rt.
380  */
381 int
382 t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt,
383     struct sockaddr *nam)
384 {
385 	struct adapter *sc = tod->tod_softc;
386 	struct tom_data *td = tod_td(tod);
387 	struct toepcb *toep = NULL;
388 	struct wrqe *wr = NULL;
389 	struct ifnet *rt_ifp = rt->rt_ifp;
390 	struct vi_info *vi;
391 	int mtu_idx, rscale, qid_atid, rc, isipv6, txqid, rxqid;
392 	struct inpcb *inp = sotoinpcb(so);
393 	struct tcpcb *tp = intotcpcb(inp);
394 	int reason;
395 	struct offload_settings settings;
396 	uint16_t vid = 0xffff;
397 
398 	INP_WLOCK_ASSERT(inp);
399 	KASSERT(nam->sa_family == AF_INET || nam->sa_family == AF_INET6,
400 	    ("%s: dest addr %p has family %u", __func__, nam, nam->sa_family));
401 
402 	if (rt_ifp->if_type == IFT_ETHER)
403 		vi = rt_ifp->if_softc;
404 	else if (rt_ifp->if_type == IFT_L2VLAN) {
405 		struct ifnet *ifp = VLAN_COOKIE(rt_ifp);
406 
407 		vi = ifp->if_softc;
408 		VLAN_TAG(ifp, &vid);
409 	} else if (rt_ifp->if_type == IFT_IEEE8023ADLAG)
410 		DONT_OFFLOAD_ACTIVE_OPEN(ENOSYS); /* XXX: implement lagg+TOE */
411 	else
412 		DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP);
413 
414 	rw_rlock(&sc->policy_lock);
415 	settings = *lookup_offload_policy(sc, OPEN_TYPE_ACTIVE, NULL, vid, inp);
416 	rw_runlock(&sc->policy_lock);
417 	if (!settings.offload)
418 		DONT_OFFLOAD_ACTIVE_OPEN(EPERM);
419 
420 	if (settings.txq >= 0 && settings.txq < vi->nofldtxq)
421 		txqid = settings.txq;
422 	else
423 		txqid = arc4random() % vi->nofldtxq;
424 	txqid += vi->first_ofld_txq;
425 	if (settings.rxq >= 0 && settings.rxq < vi->nofldrxq)
426 		rxqid = settings.rxq;
427 	else
428 		rxqid = arc4random() % vi->nofldrxq;
429 	rxqid += vi->first_ofld_rxq;
430 
431 	toep = alloc_toepcb(vi, txqid, rxqid, M_NOWAIT | M_ZERO);
432 	if (toep == NULL)
433 		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
434 
435 	toep->tid = alloc_atid(sc, toep);
436 	if (toep->tid < 0)
437 		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
438 
439 	toep->l2te = t4_l2t_get(vi->pi, rt_ifp,
440 	    rt->rt_flags & RTF_GATEWAY ? rt->rt_gateway : nam);
441 	if (toep->l2te == NULL)
442 		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
443 
444 	isipv6 = nam->sa_family == AF_INET6;
445 	wr = alloc_wrqe(act_open_cpl_size(sc, isipv6), toep->ctrlq);
446 	if (wr == NULL)
447 		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
448 
449 	toep->vnet = so->so_vnet;
450 	set_ulp_mode(toep, select_ulp_mode(so, sc, &settings));
451 	SOCKBUF_LOCK(&so->so_rcv);
452 	/* opt0 rcv_bufsiz initially, assumes its normal meaning later */
453 	toep->rx_credits = min(select_rcv_wnd(so) >> 10, M_RCV_BUFSIZ);
454 	SOCKBUF_UNLOCK(&so->so_rcv);
455 
456 	/*
457 	 * The kernel sets request_r_scale based on sb_max whereas we need to
458 	 * take hardware's MAX_RCV_WND into account too.  This is normally a
459 	 * no-op as MAX_RCV_WND is much larger than the default sb_max.
460 	 */
461 	if (tp->t_flags & TF_REQ_SCALE)
462 		rscale = tp->request_r_scale = select_rcv_wscale();
463 	else
464 		rscale = 0;
465 	mtu_idx = find_best_mtu_idx(sc, &inp->inp_inc, &settings);
466 	qid_atid = (toep->ofld_rxq->iq.abs_id << 14) | toep->tid;
467 
468 	if (isipv6) {
469 		struct cpl_act_open_req6 *cpl = wrtod(wr);
470 		struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
471 		struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
472 
473 		if ((inp->inp_vflag & INP_IPV6) == 0)
474 			DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP);
475 
476 		toep->ce = hold_lip(td, &inp->in6p_laddr, NULL);
477 		if (toep->ce == NULL)
478 			DONT_OFFLOAD_ACTIVE_OPEN(ENOENT);
479 
480 		switch (chip_id(sc)) {
481 		case CHELSIO_T4:
482 			INIT_TP_WR(cpl, 0);
483 			cpl->params = select_ntuple(vi, toep->l2te);
484 			break;
485 		case CHELSIO_T5:
486 			INIT_TP_WR(cpl5, 0);
487 			cpl5->iss = htobe32(tp->iss);
488 			cpl5->params = select_ntuple(vi, toep->l2te);
489 			break;
490 		case CHELSIO_T6:
491 		default:
492 			INIT_TP_WR(cpl6, 0);
493 			cpl6->iss = htobe32(tp->iss);
494 			cpl6->params = select_ntuple(vi, toep->l2te);
495 			break;
496 		}
497 		OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
498 		    qid_atid));
499 		cpl->local_port = inp->inp_lport;
500 		cpl->local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0];
501 		cpl->local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8];
502 		cpl->peer_port = inp->inp_fport;
503 		cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0];
504 		cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8];
505 		cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale,
506 		    toep->rx_credits, toep->ulp_mode, &settings);
507 		cpl->opt2 = calc_opt2a(so, toep, &settings);
508 	} else {
509 		struct cpl_act_open_req *cpl = wrtod(wr);
510 		struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
511 		struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
512 
513 		switch (chip_id(sc)) {
514 		case CHELSIO_T4:
515 			INIT_TP_WR(cpl, 0);
516 			cpl->params = select_ntuple(vi, toep->l2te);
517 			break;
518 		case CHELSIO_T5:
519 			INIT_TP_WR(cpl5, 0);
520 			cpl5->iss = htobe32(tp->iss);
521 			cpl5->params = select_ntuple(vi, toep->l2te);
522 			break;
523 		case CHELSIO_T6:
524 		default:
525 			INIT_TP_WR(cpl6, 0);
526 			cpl6->iss = htobe32(tp->iss);
527 			cpl6->params = select_ntuple(vi, toep->l2te);
528 			break;
529 		}
530 		OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
531 		    qid_atid));
532 		inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port,
533 		    &cpl->peer_ip, &cpl->peer_port);
534 		cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale,
535 		    toep->rx_credits, toep->ulp_mode, &settings);
536 		cpl->opt2 = calc_opt2a(so, toep, &settings);
537 	}
538 
539 	CTR5(KTR_CXGBE, "%s: atid %u (%s), toep %p, inp %p", __func__,
540 	    toep->tid, tcpstates[tp->t_state], toep, inp);
541 
542 	offload_socket(so, toep);
543 	rc = t4_l2t_send(sc, wr, toep->l2te);
544 	if (rc == 0) {
545 		toep->flags |= TPF_CPL_PENDING;
546 		return (0);
547 	}
548 
549 	undo_offload_socket(so);
550 	reason = __LINE__;
551 failed:
552 	CTR3(KTR_CXGBE, "%s: not offloading (%d), rc %d", __func__, reason, rc);
553 
554 	if (wr)
555 		free_wrqe(wr);
556 
557 	if (toep) {
558 		if (toep->tid >= 0)
559 			free_atid(sc, toep->tid);
560 		if (toep->l2te)
561 			t4_l2t_release(toep->l2te);
562 		if (toep->ce)
563 			release_lip(td, toep->ce);
564 		free_toepcb(toep);
565 	}
566 
567 	return (rc);
568 }
569 #endif
570