1 /*- 2 * Copyright (c) 2012, 2015 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 33 #ifdef TCP_OFFLOAD 34 #include <sys/param.h> 35 #include <sys/types.h> 36 #include <sys/kernel.h> 37 #include <sys/ktr.h> 38 #include <sys/module.h> 39 #include <sys/protosw.h> 40 #include <sys/domain.h> 41 #include <sys/socket.h> 42 #include <sys/socketvar.h> 43 #include <sys/sglist.h> 44 #include <netinet/in.h> 45 #include <netinet/in_pcb.h> 46 #include <netinet/ip.h> 47 #include <netinet/ip6.h> 48 #define TCPSTATES 49 #include <netinet/tcp_fsm.h> 50 #include <netinet/tcp_seq.h> 51 #include <netinet/tcp_var.h> 52 #include <netinet/toecore.h> 53 54 #include "common/common.h" 55 #include "common/t4_msg.h" 56 #include "common/t4_regs.h" 57 #include "common/t4_tcb.h" 58 #include "tom/t4_tom_l2t.h" 59 #include "tom/t4_tom.h" 60 61 VNET_DECLARE(int, tcp_do_autosndbuf); 62 #define V_tcp_do_autosndbuf VNET(tcp_do_autosndbuf) 63 VNET_DECLARE(int, tcp_autosndbuf_inc); 64 #define V_tcp_autosndbuf_inc VNET(tcp_autosndbuf_inc) 65 VNET_DECLARE(int, tcp_autosndbuf_max); 66 #define V_tcp_autosndbuf_max VNET(tcp_autosndbuf_max) 67 VNET_DECLARE(int, tcp_do_autorcvbuf); 68 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf) 69 VNET_DECLARE(int, tcp_autorcvbuf_inc); 70 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc) 71 VNET_DECLARE(int, tcp_autorcvbuf_max); 72 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max) 73 74 void 75 send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp) 76 { 77 struct wrqe *wr; 78 struct fw_flowc_wr *flowc; 79 unsigned int nparams = ftxp ? 8 : 6, flowclen; 80 struct vi_info *vi = toep->vi; 81 struct port_info *pi = vi->pi; 82 struct adapter *sc = pi->adapter; 83 unsigned int pfvf = G_FW_VIID_PFN(vi->viid) << S_FW_VIID_PFN; 84 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 85 86 KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT), 87 ("%s: flowc for tid %u sent already", __func__, toep->tid)); 88 89 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); 90 91 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq); 92 if (wr == NULL) { 93 /* XXX */ 94 panic("%s: allocation failure.", __func__); 95 } 96 flowc = wrtod(wr); 97 memset(flowc, 0, wr->wr_len); 98 99 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 100 V_FW_FLOWC_WR_NPARAMS(nparams)); 101 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | 102 V_FW_WR_FLOWID(toep->tid)); 103 104 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 105 flowc->mnemval[0].val = htobe32(pfvf); 106 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 107 flowc->mnemval[1].val = htobe32(pi->tx_chan); 108 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 109 flowc->mnemval[2].val = htobe32(pi->tx_chan); 110 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 111 flowc->mnemval[3].val = htobe32(toep->ofld_rxq->iq.abs_id); 112 if (ftxp) { 113 uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf); 114 115 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 116 flowc->mnemval[4].val = htobe32(ftxp->snd_nxt); 117 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 118 flowc->mnemval[5].val = htobe32(ftxp->rcv_nxt); 119 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 120 flowc->mnemval[6].val = htobe32(sndbuf); 121 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 122 flowc->mnemval[7].val = htobe32(ftxp->mss); 123 124 CTR6(KTR_CXGBE, 125 "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x", 126 __func__, toep->tid, ftxp->mss, sndbuf, ftxp->snd_nxt, 127 ftxp->rcv_nxt); 128 } else { 129 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF; 130 flowc->mnemval[4].val = htobe32(512); 131 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS; 132 flowc->mnemval[5].val = htobe32(512); 133 134 CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); 135 } 136 137 txsd->tx_credits = howmany(flowclen, 16); 138 txsd->plen = 0; 139 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 140 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 141 toep->tx_credits -= txsd->tx_credits; 142 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 143 toep->txsd_pidx = 0; 144 toep->txsd_avail--; 145 146 toep->flags |= TPF_FLOWC_WR_SENT; 147 t4_wrq_tx(sc, wr); 148 } 149 150 void 151 send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt) 152 { 153 struct wrqe *wr; 154 struct cpl_abort_req *req; 155 int tid = toep->tid; 156 struct inpcb *inp = toep->inp; 157 struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */ 158 159 INP_WLOCK_ASSERT(inp); 160 161 CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s", 162 __func__, toep->tid, 163 inp->inp_flags & INP_DROPPED ? "inp dropped" : 164 tcpstates[tp->t_state], 165 toep->flags, inp->inp_flags, 166 toep->flags & TPF_ABORT_SHUTDOWN ? 167 " (abort already in progress)" : ""); 168 169 if (toep->flags & TPF_ABORT_SHUTDOWN) 170 return; /* abort already in progress */ 171 172 toep->flags |= TPF_ABORT_SHUTDOWN; 173 174 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 175 ("%s: flowc_wr not sent for tid %d.", __func__, tid)); 176 177 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 178 if (wr == NULL) { 179 /* XXX */ 180 panic("%s: allocation failure.", __func__); 181 } 182 req = wrtod(wr); 183 184 INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid); 185 if (inp->inp_flags & INP_DROPPED) 186 req->rsvd0 = htobe32(snd_nxt); 187 else 188 req->rsvd0 = htobe32(tp->snd_nxt); 189 req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT); 190 req->cmd = CPL_ABORT_SEND_RST; 191 192 /* 193 * XXX: What's the correct way to tell that the inp hasn't been detached 194 * from its socket? Should I even be flushing the snd buffer here? 195 */ 196 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 197 struct socket *so = inp->inp_socket; 198 199 if (so != NULL) /* because I'm not sure. See comment above */ 200 sbflush(&so->so_snd); 201 } 202 203 t4_l2t_send(sc, wr, toep->l2te); 204 } 205 206 /* 207 * Called when a connection is established to translate the TCP options 208 * reported by HW to FreeBSD's native format. 209 */ 210 static void 211 assign_rxopt(struct tcpcb *tp, unsigned int opt) 212 { 213 struct toepcb *toep = tp->t_toe; 214 struct inpcb *inp = tp->t_inpcb; 215 struct adapter *sc = td_adapter(toep->td); 216 int n; 217 218 INP_LOCK_ASSERT(inp); 219 220 if (inp->inp_inc.inc_flags & INC_ISIPV6) 221 n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 222 else 223 n = sizeof(struct ip) + sizeof(struct tcphdr); 224 tp->t_maxseg = sc->params.mtus[G_TCPOPT_MSS(opt)] - n; 225 226 CTR4(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u)", __func__, toep->tid, 227 G_TCPOPT_MSS(opt), sc->params.mtus[G_TCPOPT_MSS(opt)]); 228 229 if (G_TCPOPT_TSTAMP(opt)) { 230 tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */ 231 tp->ts_recent = 0; /* hmmm */ 232 tp->ts_recent_age = tcp_ts_getticks(); 233 } 234 235 if (G_TCPOPT_SACK(opt)) 236 tp->t_flags |= TF_SACK_PERMIT; /* should already be set */ 237 else 238 tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */ 239 240 if (G_TCPOPT_WSCALE_OK(opt)) 241 tp->t_flags |= TF_RCVD_SCALE; 242 243 /* Doing window scaling? */ 244 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 245 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 246 tp->rcv_scale = tp->request_r_scale; 247 tp->snd_scale = G_TCPOPT_SND_WSCALE(opt); 248 } 249 } 250 251 /* 252 * Completes some final bits of initialization for just established connections 253 * and changes their state to TCPS_ESTABLISHED. 254 * 255 * The ISNs are from after the exchange of SYNs. i.e., the true ISN + 1. 256 */ 257 void 258 make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn, 259 uint16_t opt) 260 { 261 struct inpcb *inp = toep->inp; 262 struct socket *so = inp->inp_socket; 263 struct tcpcb *tp = intotcpcb(inp); 264 long bufsize; 265 uint32_t iss = be32toh(snd_isn) - 1; /* true ISS */ 266 uint32_t irs = be32toh(rcv_isn) - 1; /* true IRS */ 267 uint16_t tcpopt = be16toh(opt); 268 struct flowc_tx_params ftxp; 269 270 INP_WLOCK_ASSERT(inp); 271 KASSERT(tp->t_state == TCPS_SYN_SENT || 272 tp->t_state == TCPS_SYN_RECEIVED, 273 ("%s: TCP state %s", __func__, tcpstates[tp->t_state])); 274 275 CTR4(KTR_CXGBE, "%s: tid %d, toep %p, inp %p", 276 __func__, toep->tid, toep, inp); 277 278 tp->t_state = TCPS_ESTABLISHED; 279 tp->t_starttime = ticks; 280 TCPSTAT_INC(tcps_connects); 281 282 tp->irs = irs; 283 tcp_rcvseqinit(tp); 284 tp->rcv_wnd = toep->rx_credits << 10; 285 tp->rcv_adv += tp->rcv_wnd; 286 tp->last_ack_sent = tp->rcv_nxt; 287 288 /* 289 * If we were unable to send all rx credits via opt0, save the remainder 290 * in rx_credits so that they can be handed over with the next credit 291 * update. 292 */ 293 SOCKBUF_LOCK(&so->so_rcv); 294 bufsize = select_rcv_wnd(so); 295 SOCKBUF_UNLOCK(&so->so_rcv); 296 toep->rx_credits = bufsize - tp->rcv_wnd; 297 298 tp->iss = iss; 299 tcp_sendseqinit(tp); 300 tp->snd_una = iss + 1; 301 tp->snd_nxt = iss + 1; 302 tp->snd_max = iss + 1; 303 304 assign_rxopt(tp, tcpopt); 305 306 SOCKBUF_LOCK(&so->so_snd); 307 if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf) 308 bufsize = V_tcp_autosndbuf_max; 309 else 310 bufsize = sbspace(&so->so_snd); 311 SOCKBUF_UNLOCK(&so->so_snd); 312 313 ftxp.snd_nxt = tp->snd_nxt; 314 ftxp.rcv_nxt = tp->rcv_nxt; 315 ftxp.snd_space = bufsize; 316 ftxp.mss = tp->t_maxseg; 317 send_flowc_wr(toep, &ftxp); 318 319 soisconnected(so); 320 } 321 322 static int 323 send_rx_credits(struct adapter *sc, struct toepcb *toep, int credits) 324 { 325 struct wrqe *wr; 326 struct cpl_rx_data_ack *req; 327 uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); 328 329 KASSERT(credits >= 0, ("%s: %d credits", __func__, credits)); 330 331 wr = alloc_wrqe(sizeof(*req), toep->ctrlq); 332 if (wr == NULL) 333 return (0); 334 req = wrtod(wr); 335 336 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); 337 req->credit_dack = htobe32(dack | V_RX_CREDITS(credits)); 338 339 t4_wrq_tx(sc, wr); 340 return (credits); 341 } 342 343 void 344 t4_rcvd(struct toedev *tod, struct tcpcb *tp) 345 { 346 struct adapter *sc = tod->tod_softc; 347 struct inpcb *inp = tp->t_inpcb; 348 struct socket *so = inp->inp_socket; 349 struct sockbuf *sb = &so->so_rcv; 350 struct toepcb *toep = tp->t_toe; 351 int credits; 352 353 INP_WLOCK_ASSERT(inp); 354 355 SOCKBUF_LOCK(sb); 356 KASSERT(toep->sb_cc >= sbused(sb), 357 ("%s: sb %p has more data (%d) than last time (%d).", 358 __func__, sb, sbused(sb), toep->sb_cc)); 359 360 toep->rx_credits += toep->sb_cc - sbused(sb); 361 toep->sb_cc = sbused(sb); 362 363 if (toep->rx_credits > 0 && 364 (tp->rcv_wnd <= 32 * 1024 || toep->rx_credits >= 64 * 1024 || 365 (toep->rx_credits >= 16 * 1024 && tp->rcv_wnd <= 128 * 1024) || 366 toep->sb_cc + tp->rcv_wnd < sb->sb_lowat)) { 367 368 credits = send_rx_credits(sc, toep, toep->rx_credits); 369 toep->rx_credits -= credits; 370 tp->rcv_wnd += credits; 371 tp->rcv_adv += credits; 372 } 373 SOCKBUF_UNLOCK(sb); 374 } 375 376 /* 377 * Close a connection by sending a CPL_CLOSE_CON_REQ message. 378 */ 379 static int 380 close_conn(struct adapter *sc, struct toepcb *toep) 381 { 382 struct wrqe *wr; 383 struct cpl_close_con_req *req; 384 unsigned int tid = toep->tid; 385 386 CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid, 387 toep->flags & TPF_FIN_SENT ? ", IGNORED" : ""); 388 389 if (toep->flags & TPF_FIN_SENT) 390 return (0); 391 392 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 393 ("%s: flowc_wr not sent for tid %u.", __func__, tid)); 394 395 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 396 if (wr == NULL) { 397 /* XXX */ 398 panic("%s: allocation failure.", __func__); 399 } 400 req = wrtod(wr); 401 402 req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | 403 V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr))); 404 req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) | 405 V_FW_WR_FLOWID(tid)); 406 req->wr.wr_lo = cpu_to_be64(0); 407 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 408 req->rsvd = 0; 409 410 toep->flags |= TPF_FIN_SENT; 411 toep->flags &= ~TPF_SEND_FIN; 412 t4_l2t_send(sc, wr, toep->l2te); 413 414 return (0); 415 } 416 417 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16) 418 #define MIN_OFLD_TX_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16)) 419 420 /* Maximum amount of immediate data we could stuff in a WR */ 421 static inline int 422 max_imm_payload(int tx_credits) 423 { 424 const int n = 2; /* Use only up to 2 desc for imm. data WR */ 425 426 KASSERT(tx_credits >= 0 && 427 tx_credits <= MAX_OFLD_TX_CREDITS, 428 ("%s: %d credits", __func__, tx_credits)); 429 430 if (tx_credits < MIN_OFLD_TX_CREDITS) 431 return (0); 432 433 if (tx_credits >= (n * EQ_ESIZE) / 16) 434 return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr)); 435 else 436 return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr)); 437 } 438 439 /* Maximum number of SGL entries we could stuff in a WR */ 440 static inline int 441 max_dsgl_nsegs(int tx_credits) 442 { 443 int nseg = 1; /* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */ 444 int sge_pair_credits = tx_credits - MIN_OFLD_TX_CREDITS; 445 446 KASSERT(tx_credits >= 0 && 447 tx_credits <= MAX_OFLD_TX_CREDITS, 448 ("%s: %d credits", __func__, tx_credits)); 449 450 if (tx_credits < MIN_OFLD_TX_CREDITS) 451 return (0); 452 453 nseg += 2 * (sge_pair_credits * 16 / 24); 454 if ((sge_pair_credits * 16) % 24 == 16) 455 nseg++; 456 457 return (nseg); 458 } 459 460 static inline void 461 write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen, 462 unsigned int plen, uint8_t credits, int shove, int ulp_submode, int txalign) 463 { 464 struct fw_ofld_tx_data_wr *txwr = dst; 465 466 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) | 467 V_FW_WR_IMMDLEN(immdlen)); 468 txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) | 469 V_FW_WR_LEN16(credits)); 470 txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(toep->ulp_mode) | 471 V_TX_ULP_SUBMODE(ulp_submode) | V_TX_URG(0) | V_TX_SHOVE(shove)); 472 txwr->plen = htobe32(plen); 473 474 if (txalign > 0) { 475 struct tcpcb *tp = intotcpcb(toep->inp); 476 477 if (plen < 2 * tp->t_maxseg || is_10G_port(toep->vi->pi)) 478 txwr->lsodisable_to_flags |= 479 htobe32(F_FW_OFLD_TX_DATA_WR_LSODISABLE); 480 else 481 txwr->lsodisable_to_flags |= 482 htobe32(F_FW_OFLD_TX_DATA_WR_ALIGNPLD | 483 (tp->t_flags & TF_NODELAY ? 0 : 484 F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE)); 485 } 486 } 487 488 /* 489 * Generate a DSGL from a starting mbuf. The total number of segments and the 490 * maximum segments in any one mbuf are provided. 491 */ 492 static void 493 write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n) 494 { 495 struct mbuf *m; 496 struct ulptx_sgl *usgl = dst; 497 int i, j, rc; 498 struct sglist sg; 499 struct sglist_seg segs[n]; 500 501 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__)); 502 503 sglist_init(&sg, n, segs); 504 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 505 V_ULPTX_NSGE(nsegs)); 506 507 i = -1; 508 for (m = start; m != stop; m = m->m_next) { 509 rc = sglist_append(&sg, mtod(m, void *), m->m_len); 510 if (__predict_false(rc != 0)) 511 panic("%s: sglist_append %d", __func__, rc); 512 513 for (j = 0; j < sg.sg_nseg; i++, j++) { 514 if (i < 0) { 515 usgl->len0 = htobe32(segs[j].ss_len); 516 usgl->addr0 = htobe64(segs[j].ss_paddr); 517 } else { 518 usgl->sge[i / 2].len[i & 1] = 519 htobe32(segs[j].ss_len); 520 usgl->sge[i / 2].addr[i & 1] = 521 htobe64(segs[j].ss_paddr); 522 } 523 #ifdef INVARIANTS 524 nsegs--; 525 #endif 526 } 527 sglist_reset(&sg); 528 } 529 if (i & 1) 530 usgl->sge[i / 2].len[1] = htobe32(0); 531 KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, stop %p", 532 __func__, nsegs, start, stop)); 533 } 534 535 /* 536 * Max number of SGL entries an offload tx work request can have. This is 41 537 * (1 + 40) for a full 512B work request. 538 * fw_ofld_tx_data_wr(16B) + ulptx_sgl(16B, 1) + ulptx_sge_pair(480B, 40) 539 */ 540 #define OFLD_SGL_LEN (41) 541 542 /* 543 * Send data and/or a FIN to the peer. 544 * 545 * The socket's so_snd buffer consists of a stream of data starting with sb_mb 546 * and linked together with m_next. sb_sndptr, if set, is the last mbuf that 547 * was transmitted. 548 * 549 * drop indicates the number of bytes that should be dropped from the head of 550 * the send buffer. It is an optimization that lets do_fw4_ack avoid creating 551 * contention on the send buffer lock (before this change it used to do 552 * sowwakeup and then t4_push_frames right after that when recovering from tx 553 * stalls). When drop is set this function MUST drop the bytes and wake up any 554 * writers. 555 */ 556 void 557 t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop) 558 { 559 struct mbuf *sndptr, *m, *sb_sndptr; 560 struct fw_ofld_tx_data_wr *txwr; 561 struct wrqe *wr; 562 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 563 struct inpcb *inp = toep->inp; 564 struct tcpcb *tp = intotcpcb(inp); 565 struct socket *so = inp->inp_socket; 566 struct sockbuf *sb = &so->so_snd; 567 int tx_credits, shove, compl, space, sowwakeup; 568 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 569 570 INP_WLOCK_ASSERT(inp); 571 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 572 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 573 574 KASSERT(toep->ulp_mode == ULP_MODE_NONE || 575 toep->ulp_mode == ULP_MODE_TCPDDP || 576 toep->ulp_mode == ULP_MODE_RDMA, 577 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 578 579 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) 580 return; 581 582 /* 583 * This function doesn't resume by itself. Someone else must clear the 584 * flag and call this function. 585 */ 586 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 587 KASSERT(drop == 0, 588 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 589 return; 590 } 591 592 do { 593 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 594 max_imm = max_imm_payload(tx_credits); 595 max_nsegs = max_dsgl_nsegs(tx_credits); 596 597 SOCKBUF_LOCK(sb); 598 sowwakeup = drop; 599 if (drop) { 600 sbdrop_locked(sb, drop); 601 drop = 0; 602 } 603 sb_sndptr = sb->sb_sndptr; 604 sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb; 605 plen = 0; 606 nsegs = 0; 607 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 608 for (m = sndptr; m != NULL; m = m->m_next) { 609 int n = sglist_count(mtod(m, void *), m->m_len); 610 611 nsegs += n; 612 plen += m->m_len; 613 614 /* This mbuf sent us _over_ the nsegs limit, back out */ 615 if (plen > max_imm && nsegs > max_nsegs) { 616 nsegs -= n; 617 plen -= m->m_len; 618 if (plen == 0) { 619 /* Too few credits */ 620 toep->flags |= TPF_TX_SUSPENDED; 621 if (sowwakeup) 622 sowwakeup_locked(so); 623 else 624 SOCKBUF_UNLOCK(sb); 625 SOCKBUF_UNLOCK_ASSERT(sb); 626 return; 627 } 628 break; 629 } 630 631 if (max_nsegs_1mbuf < n) 632 max_nsegs_1mbuf = n; 633 sb_sndptr = m; /* new sb->sb_sndptr if all goes well */ 634 635 /* This mbuf put us right at the max_nsegs limit */ 636 if (plen > max_imm && nsegs == max_nsegs) { 637 m = m->m_next; 638 break; 639 } 640 } 641 642 space = sbspace(sb); 643 644 if (space <= sb->sb_hiwat * 3 / 8 && 645 toep->plen_nocompl + plen >= sb->sb_hiwat / 4) 646 compl = 1; 647 else 648 compl = 0; 649 650 if (sb->sb_flags & SB_AUTOSIZE && 651 V_tcp_do_autosndbuf && 652 sb->sb_hiwat < V_tcp_autosndbuf_max && 653 space < sb->sb_hiwat / 8) { 654 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc, 655 V_tcp_autosndbuf_max); 656 657 if (!sbreserve_locked(sb, newsize, so, NULL)) 658 sb->sb_flags &= ~SB_AUTOSIZE; 659 else 660 sowwakeup = 1; /* room available */ 661 } 662 if (sowwakeup) 663 sowwakeup_locked(so); 664 else 665 SOCKBUF_UNLOCK(sb); 666 SOCKBUF_UNLOCK_ASSERT(sb); 667 668 /* nothing to send */ 669 if (plen == 0) { 670 KASSERT(m == NULL, 671 ("%s: nothing to send, but m != NULL", __func__)); 672 break; 673 } 674 675 if (__predict_false(toep->flags & TPF_FIN_SENT)) 676 panic("%s: excess tx.", __func__); 677 678 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME); 679 if (plen <= max_imm) { 680 681 /* Immediate data tx */ 682 683 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 684 toep->ofld_txq); 685 if (wr == NULL) { 686 /* XXX: how will we recover from this? */ 687 toep->flags |= TPF_TX_SUSPENDED; 688 return; 689 } 690 txwr = wrtod(wr); 691 credits = howmany(wr->wr_len, 16); 692 write_tx_wr(txwr, toep, plen, plen, credits, shove, 0, 693 sc->tt.tx_align); 694 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 695 nsegs = 0; 696 } else { 697 int wr_len; 698 699 /* DSGL tx */ 700 701 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 702 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 703 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 704 if (wr == NULL) { 705 /* XXX: how will we recover from this? */ 706 toep->flags |= TPF_TX_SUSPENDED; 707 return; 708 } 709 txwr = wrtod(wr); 710 credits = howmany(wr_len, 16); 711 write_tx_wr(txwr, toep, 0, plen, credits, shove, 0, 712 sc->tt.tx_align); 713 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 714 max_nsegs_1mbuf); 715 if (wr_len & 0xf) { 716 uint64_t *pad = (uint64_t *) 717 ((uintptr_t)txwr + wr_len); 718 *pad = 0; 719 } 720 } 721 722 KASSERT(toep->tx_credits >= credits, 723 ("%s: not enough credits", __func__)); 724 725 toep->tx_credits -= credits; 726 toep->tx_nocompl += credits; 727 toep->plen_nocompl += plen; 728 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 729 toep->tx_nocompl >= toep->tx_total / 4) 730 compl = 1; 731 732 if (compl || toep->ulp_mode == ULP_MODE_RDMA) { 733 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 734 toep->tx_nocompl = 0; 735 toep->plen_nocompl = 0; 736 } 737 738 tp->snd_nxt += plen; 739 tp->snd_max += plen; 740 741 SOCKBUF_LOCK(sb); 742 KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__)); 743 sb->sb_sndptr = sb_sndptr; 744 SOCKBUF_UNLOCK(sb); 745 746 toep->flags |= TPF_TX_DATA_SENT; 747 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 748 toep->flags |= TPF_TX_SUSPENDED; 749 750 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 751 txsd->plen = plen; 752 txsd->tx_credits = credits; 753 txsd++; 754 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 755 toep->txsd_pidx = 0; 756 txsd = &toep->txsd[0]; 757 } 758 toep->txsd_avail--; 759 760 t4_l2t_send(sc, wr, toep->l2te); 761 } while (m != NULL); 762 763 /* Send a FIN if requested, but only if there's no more data to send */ 764 if (m == NULL && toep->flags & TPF_SEND_FIN) 765 close_conn(sc, toep); 766 } 767 768 static inline void 769 rqdrop_locked(struct mbufq *q, int plen) 770 { 771 struct mbuf *m; 772 773 while (plen > 0) { 774 m = mbufq_dequeue(q); 775 776 /* Too many credits. */ 777 MPASS(m != NULL); 778 M_ASSERTPKTHDR(m); 779 780 /* Partial credits. */ 781 MPASS(plen >= m->m_pkthdr.len); 782 783 plen -= m->m_pkthdr.len; 784 m_freem(m); 785 } 786 } 787 788 void 789 t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop) 790 { 791 struct mbuf *sndptr, *m; 792 struct fw_ofld_tx_data_wr *txwr; 793 struct wrqe *wr; 794 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 795 u_int adjusted_plen, ulp_submode; 796 struct inpcb *inp = toep->inp; 797 struct tcpcb *tp = intotcpcb(inp); 798 int tx_credits, shove; 799 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 800 struct mbufq *pduq = &toep->ulp_pduq; 801 static const u_int ulp_extra_len[] = {0, 4, 4, 8}; 802 803 INP_WLOCK_ASSERT(inp); 804 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 805 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 806 KASSERT(toep->ulp_mode == ULP_MODE_ISCSI, 807 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 808 809 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) 810 return; 811 812 /* 813 * This function doesn't resume by itself. Someone else must clear the 814 * flag and call this function. 815 */ 816 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 817 KASSERT(drop == 0, 818 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 819 return; 820 } 821 822 if (drop) 823 rqdrop_locked(&toep->ulp_pdu_reclaimq, drop); 824 825 while ((sndptr = mbufq_first(pduq)) != NULL) { 826 M_ASSERTPKTHDR(sndptr); 827 828 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 829 max_imm = max_imm_payload(tx_credits); 830 max_nsegs = max_dsgl_nsegs(tx_credits); 831 832 plen = 0; 833 nsegs = 0; 834 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 835 for (m = sndptr; m != NULL; m = m->m_next) { 836 int n = sglist_count(mtod(m, void *), m->m_len); 837 838 nsegs += n; 839 plen += m->m_len; 840 841 /* 842 * This mbuf would send us _over_ the nsegs limit. 843 * Suspend tx because the PDU can't be sent out. 844 */ 845 if (plen > max_imm && nsegs > max_nsegs) { 846 toep->flags |= TPF_TX_SUSPENDED; 847 return; 848 } 849 850 if (max_nsegs_1mbuf < n) 851 max_nsegs_1mbuf = n; 852 } 853 854 if (__predict_false(toep->flags & TPF_FIN_SENT)) 855 panic("%s: excess tx.", __func__); 856 857 /* 858 * We have a PDU to send. All of it goes out in one WR so 'm' 859 * is NULL. A PDU's length is always a multiple of 4. 860 */ 861 MPASS(m == NULL); 862 MPASS((plen & 3) == 0); 863 MPASS(sndptr->m_pkthdr.len == plen); 864 865 shove = !(tp->t_flags & TF_MORETOCOME); 866 ulp_submode = mbuf_ulp_submode(sndptr); 867 MPASS(ulp_submode < nitems(ulp_extra_len)); 868 869 /* 870 * plen doesn't include header and data digests, which are 871 * generated and inserted in the right places by the TOE, but 872 * they do occupy TCP sequence space and need to be accounted 873 * for. 874 */ 875 adjusted_plen = plen + ulp_extra_len[ulp_submode]; 876 if (plen <= max_imm) { 877 878 /* Immediate data tx */ 879 880 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 881 toep->ofld_txq); 882 if (wr == NULL) { 883 /* XXX: how will we recover from this? */ 884 toep->flags |= TPF_TX_SUSPENDED; 885 return; 886 } 887 txwr = wrtod(wr); 888 credits = howmany(wr->wr_len, 16); 889 write_tx_wr(txwr, toep, plen, adjusted_plen, credits, 890 shove, ulp_submode, sc->tt.tx_align); 891 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 892 nsegs = 0; 893 } else { 894 int wr_len; 895 896 /* DSGL tx */ 897 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 898 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 899 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 900 if (wr == NULL) { 901 /* XXX: how will we recover from this? */ 902 toep->flags |= TPF_TX_SUSPENDED; 903 return; 904 } 905 txwr = wrtod(wr); 906 credits = howmany(wr_len, 16); 907 write_tx_wr(txwr, toep, 0, adjusted_plen, credits, 908 shove, ulp_submode, sc->tt.tx_align); 909 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 910 max_nsegs_1mbuf); 911 if (wr_len & 0xf) { 912 uint64_t *pad = (uint64_t *) 913 ((uintptr_t)txwr + wr_len); 914 *pad = 0; 915 } 916 } 917 918 KASSERT(toep->tx_credits >= credits, 919 ("%s: not enough credits", __func__)); 920 921 m = mbufq_dequeue(pduq); 922 MPASS(m == sndptr); 923 mbufq_enqueue(&toep->ulp_pdu_reclaimq, m); 924 925 toep->tx_credits -= credits; 926 toep->tx_nocompl += credits; 927 toep->plen_nocompl += plen; 928 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 929 toep->tx_nocompl >= toep->tx_total / 4) { 930 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 931 toep->tx_nocompl = 0; 932 toep->plen_nocompl = 0; 933 } 934 935 tp->snd_nxt += adjusted_plen; 936 tp->snd_max += adjusted_plen; 937 938 toep->flags |= TPF_TX_DATA_SENT; 939 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 940 toep->flags |= TPF_TX_SUSPENDED; 941 942 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 943 txsd->plen = plen; 944 txsd->tx_credits = credits; 945 txsd++; 946 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 947 toep->txsd_pidx = 0; 948 txsd = &toep->txsd[0]; 949 } 950 toep->txsd_avail--; 951 952 t4_l2t_send(sc, wr, toep->l2te); 953 } 954 955 /* Send a FIN if requested, but only if there are no more PDUs to send */ 956 if (mbufq_first(pduq) == NULL && toep->flags & TPF_SEND_FIN) 957 close_conn(sc, toep); 958 } 959 960 int 961 t4_tod_output(struct toedev *tod, struct tcpcb *tp) 962 { 963 struct adapter *sc = tod->tod_softc; 964 #ifdef INVARIANTS 965 struct inpcb *inp = tp->t_inpcb; 966 #endif 967 struct toepcb *toep = tp->t_toe; 968 969 INP_WLOCK_ASSERT(inp); 970 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 971 ("%s: inp %p dropped.", __func__, inp)); 972 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 973 974 if (toep->ulp_mode == ULP_MODE_ISCSI) 975 t4_push_pdus(sc, toep, 0); 976 else 977 t4_push_frames(sc, toep, 0); 978 979 return (0); 980 } 981 982 int 983 t4_send_fin(struct toedev *tod, struct tcpcb *tp) 984 { 985 struct adapter *sc = tod->tod_softc; 986 #ifdef INVARIANTS 987 struct inpcb *inp = tp->t_inpcb; 988 #endif 989 struct toepcb *toep = tp->t_toe; 990 991 INP_WLOCK_ASSERT(inp); 992 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 993 ("%s: inp %p dropped.", __func__, inp)); 994 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 995 996 toep->flags |= TPF_SEND_FIN; 997 if (tp->t_state >= TCPS_ESTABLISHED) { 998 if (toep->ulp_mode == ULP_MODE_ISCSI) 999 t4_push_pdus(sc, toep, 0); 1000 else 1001 t4_push_frames(sc, toep, 0); 1002 } 1003 1004 return (0); 1005 } 1006 1007 int 1008 t4_send_rst(struct toedev *tod, struct tcpcb *tp) 1009 { 1010 struct adapter *sc = tod->tod_softc; 1011 #if defined(INVARIANTS) 1012 struct inpcb *inp = tp->t_inpcb; 1013 #endif 1014 struct toepcb *toep = tp->t_toe; 1015 1016 INP_WLOCK_ASSERT(inp); 1017 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1018 ("%s: inp %p dropped.", __func__, inp)); 1019 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1020 1021 /* hmmmm */ 1022 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 1023 ("%s: flowc for tid %u [%s] not sent already", 1024 __func__, toep->tid, tcpstates[tp->t_state])); 1025 1026 send_reset(sc, toep, 0); 1027 return (0); 1028 } 1029 1030 /* 1031 * Peer has sent us a FIN. 1032 */ 1033 static int 1034 do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1035 { 1036 struct adapter *sc = iq->adapter; 1037 const struct cpl_peer_close *cpl = (const void *)(rss + 1); 1038 unsigned int tid = GET_TID(cpl); 1039 struct toepcb *toep = lookup_tid(sc, tid); 1040 struct inpcb *inp = toep->inp; 1041 struct tcpcb *tp = NULL; 1042 struct socket *so; 1043 struct sockbuf *sb; 1044 #ifdef INVARIANTS 1045 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1046 #endif 1047 1048 KASSERT(opcode == CPL_PEER_CLOSE, 1049 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1050 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1051 1052 if (__predict_false(toep->flags & TPF_SYNQE)) { 1053 #ifdef INVARIANTS 1054 struct synq_entry *synqe = (void *)toep; 1055 1056 INP_WLOCK(synqe->lctx->inp); 1057 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1058 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1059 ("%s: listen socket closed but tid %u not aborted.", 1060 __func__, tid)); 1061 } else { 1062 /* 1063 * do_pass_accept_req is still running and will 1064 * eventually take care of this tid. 1065 */ 1066 } 1067 INP_WUNLOCK(synqe->lctx->inp); 1068 #endif 1069 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1070 toep, toep->flags); 1071 return (0); 1072 } 1073 1074 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1075 1076 INP_INFO_RLOCK(&V_tcbinfo); 1077 INP_WLOCK(inp); 1078 tp = intotcpcb(inp); 1079 1080 CTR5(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x, inp %p", __func__, 1081 tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, inp); 1082 1083 if (toep->flags & TPF_ABORT_SHUTDOWN) 1084 goto done; 1085 1086 tp->rcv_nxt++; /* FIN */ 1087 1088 so = inp->inp_socket; 1089 sb = &so->so_rcv; 1090 SOCKBUF_LOCK(sb); 1091 if (__predict_false(toep->ddp_flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE))) { 1092 handle_ddp_close(toep, tp, sb, cpl->rcv_nxt); 1093 } 1094 socantrcvmore_locked(so); /* unlocks the sockbuf */ 1095 1096 if (toep->ulp_mode != ULP_MODE_RDMA) { 1097 KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt), 1098 ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt, 1099 be32toh(cpl->rcv_nxt))); 1100 } 1101 1102 switch (tp->t_state) { 1103 case TCPS_SYN_RECEIVED: 1104 tp->t_starttime = ticks; 1105 /* FALLTHROUGH */ 1106 1107 case TCPS_ESTABLISHED: 1108 tp->t_state = TCPS_CLOSE_WAIT; 1109 break; 1110 1111 case TCPS_FIN_WAIT_1: 1112 tp->t_state = TCPS_CLOSING; 1113 break; 1114 1115 case TCPS_FIN_WAIT_2: 1116 tcp_twstart(tp); 1117 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1118 INP_INFO_RUNLOCK(&V_tcbinfo); 1119 1120 INP_WLOCK(inp); 1121 final_cpl_received(toep); 1122 return (0); 1123 1124 default: 1125 log(LOG_ERR, "%s: TID %u received CPL_PEER_CLOSE in state %d\n", 1126 __func__, tid, tp->t_state); 1127 } 1128 done: 1129 INP_WUNLOCK(inp); 1130 INP_INFO_RUNLOCK(&V_tcbinfo); 1131 return (0); 1132 } 1133 1134 /* 1135 * Peer has ACK'd our FIN. 1136 */ 1137 static int 1138 do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss, 1139 struct mbuf *m) 1140 { 1141 struct adapter *sc = iq->adapter; 1142 const struct cpl_close_con_rpl *cpl = (const void *)(rss + 1); 1143 unsigned int tid = GET_TID(cpl); 1144 struct toepcb *toep = lookup_tid(sc, tid); 1145 struct inpcb *inp = toep->inp; 1146 struct tcpcb *tp = NULL; 1147 struct socket *so = NULL; 1148 #ifdef INVARIANTS 1149 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1150 #endif 1151 1152 KASSERT(opcode == CPL_CLOSE_CON_RPL, 1153 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1154 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1155 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1156 1157 INP_INFO_RLOCK(&V_tcbinfo); 1158 INP_WLOCK(inp); 1159 tp = intotcpcb(inp); 1160 1161 CTR4(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x", 1162 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags); 1163 1164 if (toep->flags & TPF_ABORT_SHUTDOWN) 1165 goto done; 1166 1167 so = inp->inp_socket; 1168 tp->snd_una = be32toh(cpl->snd_nxt) - 1; /* exclude FIN */ 1169 1170 switch (tp->t_state) { 1171 case TCPS_CLOSING: /* see TCPS_FIN_WAIT_2 in do_peer_close too */ 1172 tcp_twstart(tp); 1173 release: 1174 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1175 INP_INFO_RUNLOCK(&V_tcbinfo); 1176 1177 INP_WLOCK(inp); 1178 final_cpl_received(toep); /* no more CPLs expected */ 1179 1180 return (0); 1181 case TCPS_LAST_ACK: 1182 if (tcp_close(tp)) 1183 INP_WUNLOCK(inp); 1184 goto release; 1185 1186 case TCPS_FIN_WAIT_1: 1187 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 1188 soisdisconnected(so); 1189 tp->t_state = TCPS_FIN_WAIT_2; 1190 break; 1191 1192 default: 1193 log(LOG_ERR, 1194 "%s: TID %u received CPL_CLOSE_CON_RPL in state %s\n", 1195 __func__, tid, tcpstates[tp->t_state]); 1196 } 1197 done: 1198 INP_WUNLOCK(inp); 1199 INP_INFO_RUNLOCK(&V_tcbinfo); 1200 return (0); 1201 } 1202 1203 void 1204 send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid, 1205 int rst_status) 1206 { 1207 struct wrqe *wr; 1208 struct cpl_abort_rpl *cpl; 1209 1210 wr = alloc_wrqe(sizeof(*cpl), ofld_txq); 1211 if (wr == NULL) { 1212 /* XXX */ 1213 panic("%s: allocation failure.", __func__); 1214 } 1215 cpl = wrtod(wr); 1216 1217 INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid); 1218 cpl->cmd = rst_status; 1219 1220 t4_wrq_tx(sc, wr); 1221 } 1222 1223 static int 1224 abort_status_to_errno(struct tcpcb *tp, unsigned int abort_reason) 1225 { 1226 switch (abort_reason) { 1227 case CPL_ERR_BAD_SYN: 1228 case CPL_ERR_CONN_RESET: 1229 return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET); 1230 case CPL_ERR_XMIT_TIMEDOUT: 1231 case CPL_ERR_PERSIST_TIMEDOUT: 1232 case CPL_ERR_FINWAIT2_TIMEDOUT: 1233 case CPL_ERR_KEEPALIVE_TIMEDOUT: 1234 return (ETIMEDOUT); 1235 default: 1236 return (EIO); 1237 } 1238 } 1239 1240 /* 1241 * TCP RST from the peer, timeout, or some other such critical error. 1242 */ 1243 static int 1244 do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1245 { 1246 struct adapter *sc = iq->adapter; 1247 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); 1248 unsigned int tid = GET_TID(cpl); 1249 struct toepcb *toep = lookup_tid(sc, tid); 1250 struct sge_wrq *ofld_txq = toep->ofld_txq; 1251 struct inpcb *inp; 1252 struct tcpcb *tp; 1253 #ifdef INVARIANTS 1254 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1255 #endif 1256 1257 KASSERT(opcode == CPL_ABORT_REQ_RSS, 1258 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1259 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1260 1261 if (toep->flags & TPF_SYNQE) 1262 return (do_abort_req_synqe(iq, rss, m)); 1263 1264 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1265 1266 if (negative_advice(cpl->status)) { 1267 CTR4(KTR_CXGBE, "%s: negative advice %d for tid %d (0x%x)", 1268 __func__, cpl->status, tid, toep->flags); 1269 return (0); /* Ignore negative advice */ 1270 } 1271 1272 inp = toep->inp; 1273 INP_INFO_RLOCK(&V_tcbinfo); /* for tcp_close */ 1274 INP_WLOCK(inp); 1275 1276 tp = intotcpcb(inp); 1277 1278 CTR6(KTR_CXGBE, 1279 "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x, status %d", 1280 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, 1281 inp->inp_flags, cpl->status); 1282 1283 /* 1284 * If we'd initiated an abort earlier the reply to it is responsible for 1285 * cleaning up resources. Otherwise we tear everything down right here 1286 * right now. We owe the T4 a CPL_ABORT_RPL no matter what. 1287 */ 1288 if (toep->flags & TPF_ABORT_SHUTDOWN) { 1289 INP_WUNLOCK(inp); 1290 goto done; 1291 } 1292 toep->flags |= TPF_ABORT_SHUTDOWN; 1293 1294 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 1295 struct socket *so = inp->inp_socket; 1296 1297 if (so != NULL) 1298 so_error_set(so, abort_status_to_errno(tp, 1299 cpl->status)); 1300 tp = tcp_close(tp); 1301 if (tp == NULL) 1302 INP_WLOCK(inp); /* re-acquire */ 1303 } 1304 1305 final_cpl_received(toep); 1306 done: 1307 INP_INFO_RUNLOCK(&V_tcbinfo); 1308 send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST); 1309 return (0); 1310 } 1311 1312 /* 1313 * Reply to the CPL_ABORT_REQ (send_reset) 1314 */ 1315 static int 1316 do_abort_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1317 { 1318 struct adapter *sc = iq->adapter; 1319 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); 1320 unsigned int tid = GET_TID(cpl); 1321 struct toepcb *toep = lookup_tid(sc, tid); 1322 struct inpcb *inp = toep->inp; 1323 #ifdef INVARIANTS 1324 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1325 #endif 1326 1327 KASSERT(opcode == CPL_ABORT_RPL_RSS, 1328 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1329 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1330 1331 if (toep->flags & TPF_SYNQE) 1332 return (do_abort_rpl_synqe(iq, rss, m)); 1333 1334 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1335 1336 CTR5(KTR_CXGBE, "%s: tid %u, toep %p, inp %p, status %d", 1337 __func__, tid, toep, inp, cpl->status); 1338 1339 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1340 ("%s: wasn't expecting abort reply", __func__)); 1341 1342 INP_WLOCK(inp); 1343 final_cpl_received(toep); 1344 1345 return (0); 1346 } 1347 1348 static int 1349 do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1350 { 1351 struct adapter *sc = iq->adapter; 1352 const struct cpl_rx_data *cpl = mtod(m, const void *); 1353 unsigned int tid = GET_TID(cpl); 1354 struct toepcb *toep = lookup_tid(sc, tid); 1355 struct inpcb *inp = toep->inp; 1356 struct tcpcb *tp; 1357 struct socket *so; 1358 struct sockbuf *sb; 1359 int len; 1360 uint32_t ddp_placed = 0; 1361 1362 if (__predict_false(toep->flags & TPF_SYNQE)) { 1363 #ifdef INVARIANTS 1364 struct synq_entry *synqe = (void *)toep; 1365 1366 INP_WLOCK(synqe->lctx->inp); 1367 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1368 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1369 ("%s: listen socket closed but tid %u not aborted.", 1370 __func__, tid)); 1371 } else { 1372 /* 1373 * do_pass_accept_req is still running and will 1374 * eventually take care of this tid. 1375 */ 1376 } 1377 INP_WUNLOCK(synqe->lctx->inp); 1378 #endif 1379 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1380 toep, toep->flags); 1381 m_freem(m); 1382 return (0); 1383 } 1384 1385 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1386 1387 /* strip off CPL header */ 1388 m_adj(m, sizeof(*cpl)); 1389 len = m->m_pkthdr.len; 1390 1391 INP_WLOCK(inp); 1392 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) { 1393 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", 1394 __func__, tid, len, inp->inp_flags); 1395 INP_WUNLOCK(inp); 1396 m_freem(m); 1397 return (0); 1398 } 1399 1400 tp = intotcpcb(inp); 1401 1402 if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq))) 1403 ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt; 1404 1405 tp->rcv_nxt += len; 1406 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); 1407 tp->rcv_wnd -= len; 1408 tp->t_rcvtime = ticks; 1409 1410 so = inp_inpcbtosocket(inp); 1411 sb = &so->so_rcv; 1412 SOCKBUF_LOCK(sb); 1413 1414 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 1415 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)", 1416 __func__, tid, len); 1417 m_freem(m); 1418 SOCKBUF_UNLOCK(sb); 1419 INP_WUNLOCK(inp); 1420 1421 INP_INFO_RLOCK(&V_tcbinfo); 1422 INP_WLOCK(inp); 1423 tp = tcp_drop(tp, ECONNRESET); 1424 if (tp) 1425 INP_WUNLOCK(inp); 1426 INP_INFO_RUNLOCK(&V_tcbinfo); 1427 1428 return (0); 1429 } 1430 1431 /* receive buffer autosize */ 1432 if (sb->sb_flags & SB_AUTOSIZE && 1433 V_tcp_do_autorcvbuf && 1434 sb->sb_hiwat < V_tcp_autorcvbuf_max && 1435 len > (sbspace(sb) / 8 * 7)) { 1436 unsigned int hiwat = sb->sb_hiwat; 1437 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc, 1438 V_tcp_autorcvbuf_max); 1439 1440 if (!sbreserve_locked(sb, newsize, so, NULL)) 1441 sb->sb_flags &= ~SB_AUTOSIZE; 1442 else 1443 toep->rx_credits += newsize - hiwat; 1444 } 1445 1446 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1447 int changed = !(toep->ddp_flags & DDP_ON) ^ cpl->ddp_off; 1448 1449 if (changed) { 1450 if (toep->ddp_flags & DDP_SC_REQ) 1451 toep->ddp_flags ^= DDP_ON | DDP_SC_REQ; 1452 else { 1453 KASSERT(cpl->ddp_off == 1, 1454 ("%s: DDP switched on by itself.", 1455 __func__)); 1456 1457 /* Fell out of DDP mode */ 1458 toep->ddp_flags &= ~(DDP_ON | DDP_BUF0_ACTIVE | 1459 DDP_BUF1_ACTIVE); 1460 1461 if (ddp_placed) 1462 insert_ddp_data(toep, ddp_placed); 1463 } 1464 } 1465 1466 if ((toep->ddp_flags & DDP_OK) == 0 && 1467 time_uptime >= toep->ddp_disabled + DDP_RETRY_WAIT) { 1468 toep->ddp_score = DDP_LOW_SCORE; 1469 toep->ddp_flags |= DDP_OK; 1470 CTR3(KTR_CXGBE, "%s: tid %u DDP_OK @ %u", 1471 __func__, tid, time_uptime); 1472 } 1473 1474 if (toep->ddp_flags & DDP_ON) { 1475 1476 /* 1477 * CPL_RX_DATA with DDP on can only be an indicate. Ask 1478 * soreceive to post a buffer or disable DDP. The 1479 * payload that arrived in this indicate is appended to 1480 * the socket buffer as usual. 1481 */ 1482 1483 #if 0 1484 CTR5(KTR_CXGBE, 1485 "%s: tid %u (0x%x) DDP indicate (seq 0x%x, len %d)", 1486 __func__, tid, toep->flags, be32toh(cpl->seq), len); 1487 #endif 1488 sb->sb_flags |= SB_DDP_INDICATE; 1489 } else if ((toep->ddp_flags & (DDP_OK|DDP_SC_REQ)) == DDP_OK && 1490 tp->rcv_wnd > DDP_RSVD_WIN && len >= sc->tt.ddp_thres) { 1491 1492 /* 1493 * DDP allowed but isn't on (and a request to switch it 1494 * on isn't pending either), and conditions are ripe for 1495 * it to work. Switch it on. 1496 */ 1497 1498 enable_ddp(sc, toep); 1499 } 1500 } 1501 1502 KASSERT(toep->sb_cc >= sbused(sb), 1503 ("%s: sb %p has more data (%d) than last time (%d).", 1504 __func__, sb, sbused(sb), toep->sb_cc)); 1505 toep->rx_credits += toep->sb_cc - sbused(sb); 1506 sbappendstream_locked(sb, m, 0); 1507 toep->sb_cc = sbused(sb); 1508 if (toep->rx_credits > 0 && toep->sb_cc + tp->rcv_wnd < sb->sb_lowat) { 1509 int credits; 1510 1511 credits = send_rx_credits(sc, toep, toep->rx_credits); 1512 toep->rx_credits -= credits; 1513 tp->rcv_wnd += credits; 1514 tp->rcv_adv += credits; 1515 } 1516 sorwakeup_locked(so); 1517 SOCKBUF_UNLOCK_ASSERT(sb); 1518 1519 INP_WUNLOCK(inp); 1520 return (0); 1521 } 1522 1523 #define S_CPL_FW4_ACK_OPCODE 24 1524 #define M_CPL_FW4_ACK_OPCODE 0xff 1525 #define V_CPL_FW4_ACK_OPCODE(x) ((x) << S_CPL_FW4_ACK_OPCODE) 1526 #define G_CPL_FW4_ACK_OPCODE(x) \ 1527 (((x) >> S_CPL_FW4_ACK_OPCODE) & M_CPL_FW4_ACK_OPCODE) 1528 1529 #define S_CPL_FW4_ACK_FLOWID 0 1530 #define M_CPL_FW4_ACK_FLOWID 0xffffff 1531 #define V_CPL_FW4_ACK_FLOWID(x) ((x) << S_CPL_FW4_ACK_FLOWID) 1532 #define G_CPL_FW4_ACK_FLOWID(x) \ 1533 (((x) >> S_CPL_FW4_ACK_FLOWID) & M_CPL_FW4_ACK_FLOWID) 1534 1535 #define S_CPL_FW4_ACK_CR 24 1536 #define M_CPL_FW4_ACK_CR 0xff 1537 #define V_CPL_FW4_ACK_CR(x) ((x) << S_CPL_FW4_ACK_CR) 1538 #define G_CPL_FW4_ACK_CR(x) (((x) >> S_CPL_FW4_ACK_CR) & M_CPL_FW4_ACK_CR) 1539 1540 #define S_CPL_FW4_ACK_SEQVAL 0 1541 #define M_CPL_FW4_ACK_SEQVAL 0x1 1542 #define V_CPL_FW4_ACK_SEQVAL(x) ((x) << S_CPL_FW4_ACK_SEQVAL) 1543 #define G_CPL_FW4_ACK_SEQVAL(x) \ 1544 (((x) >> S_CPL_FW4_ACK_SEQVAL) & M_CPL_FW4_ACK_SEQVAL) 1545 #define F_CPL_FW4_ACK_SEQVAL V_CPL_FW4_ACK_SEQVAL(1U) 1546 1547 static int 1548 do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1549 { 1550 struct adapter *sc = iq->adapter; 1551 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 1552 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 1553 struct toepcb *toep = lookup_tid(sc, tid); 1554 struct inpcb *inp; 1555 struct tcpcb *tp; 1556 struct socket *so; 1557 uint8_t credits = cpl->credits; 1558 struct ofld_tx_sdesc *txsd; 1559 int plen; 1560 #ifdef INVARIANTS 1561 unsigned int opcode = G_CPL_FW4_ACK_OPCODE(be32toh(OPCODE_TID(cpl))); 1562 #endif 1563 1564 /* 1565 * Very unusual case: we'd sent a flowc + abort_req for a synq entry and 1566 * now this comes back carrying the credits for the flowc. 1567 */ 1568 if (__predict_false(toep->flags & TPF_SYNQE)) { 1569 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1570 ("%s: credits for a synq entry %p", __func__, toep)); 1571 return (0); 1572 } 1573 1574 inp = toep->inp; 1575 1576 KASSERT(opcode == CPL_FW4_ACK, 1577 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1578 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1579 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1580 1581 INP_WLOCK(inp); 1582 1583 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) { 1584 INP_WUNLOCK(inp); 1585 return (0); 1586 } 1587 1588 KASSERT((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0, 1589 ("%s: inp_flags 0x%x", __func__, inp->inp_flags)); 1590 1591 tp = intotcpcb(inp); 1592 1593 if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) { 1594 tcp_seq snd_una = be32toh(cpl->snd_una); 1595 1596 #ifdef INVARIANTS 1597 if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) { 1598 log(LOG_ERR, 1599 "%s: unexpected seq# %x for TID %u, snd_una %x\n", 1600 __func__, snd_una, toep->tid, tp->snd_una); 1601 } 1602 #endif 1603 1604 if (tp->snd_una != snd_una) { 1605 tp->snd_una = snd_una; 1606 tp->ts_recent_age = tcp_ts_getticks(); 1607 } 1608 } 1609 1610 so = inp->inp_socket; 1611 txsd = &toep->txsd[toep->txsd_cidx]; 1612 plen = 0; 1613 while (credits) { 1614 KASSERT(credits >= txsd->tx_credits, 1615 ("%s: too many (or partial) credits", __func__)); 1616 credits -= txsd->tx_credits; 1617 toep->tx_credits += txsd->tx_credits; 1618 plen += txsd->plen; 1619 txsd++; 1620 toep->txsd_avail++; 1621 KASSERT(toep->txsd_avail <= toep->txsd_total, 1622 ("%s: txsd avail > total", __func__)); 1623 if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) { 1624 txsd = &toep->txsd[0]; 1625 toep->txsd_cidx = 0; 1626 } 1627 } 1628 1629 if (toep->tx_credits == toep->tx_total) { 1630 toep->tx_nocompl = 0; 1631 toep->plen_nocompl = 0; 1632 } 1633 1634 if (toep->flags & TPF_TX_SUSPENDED && 1635 toep->tx_credits >= toep->tx_total / 4) { 1636 toep->flags &= ~TPF_TX_SUSPENDED; 1637 if (toep->ulp_mode == ULP_MODE_ISCSI) 1638 t4_push_pdus(sc, toep, plen); 1639 else 1640 t4_push_frames(sc, toep, plen); 1641 } else if (plen > 0) { 1642 struct sockbuf *sb = &so->so_snd; 1643 int sbu; 1644 1645 SOCKBUF_LOCK(sb); 1646 sbu = sbused(sb); 1647 if (toep->ulp_mode == ULP_MODE_ISCSI) { 1648 1649 if (__predict_false(sbu > 0)) { 1650 /* 1651 * The data trasmitted before the tid's ULP mode 1652 * changed to ISCSI is still in so_snd. 1653 * Incoming credits should account for so_snd 1654 * first. 1655 */ 1656 sbdrop_locked(sb, min(sbu, plen)); 1657 plen -= min(sbu, plen); 1658 } 1659 sowwakeup_locked(so); /* unlocks so_snd */ 1660 rqdrop_locked(&toep->ulp_pdu_reclaimq, plen); 1661 } else { 1662 sbdrop_locked(sb, plen); 1663 sowwakeup_locked(so); /* unlocks so_snd */ 1664 } 1665 SOCKBUF_UNLOCK_ASSERT(sb); 1666 } 1667 1668 INP_WUNLOCK(inp); 1669 1670 return (0); 1671 } 1672 1673 static int 1674 do_set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1675 { 1676 struct adapter *sc = iq->adapter; 1677 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 1678 unsigned int tid = GET_TID(cpl); 1679 #ifdef INVARIANTS 1680 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1681 #endif 1682 1683 KASSERT(opcode == CPL_SET_TCB_RPL, 1684 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1685 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1686 1687 if (is_ftid(sc, tid)) 1688 return (t4_filter_rpl(iq, rss, m)); /* TCB is a filter */ 1689 1690 /* 1691 * TOM and/or other ULPs don't request replies for CPL_SET_TCB or 1692 * CPL_SET_TCB_FIELD requests. This can easily change and when it does 1693 * the dispatch code will go here. 1694 */ 1695 #ifdef INVARIANTS 1696 panic("%s: Unexpected CPL_SET_TCB_RPL for tid %u on iq %p", __func__, 1697 tid, iq); 1698 #else 1699 log(LOG_ERR, "%s: Unexpected CPL_SET_TCB_RPL for tid %u on iq %p\n", 1700 __func__, tid, iq); 1701 #endif 1702 1703 return (0); 1704 } 1705 1706 void 1707 t4_set_tcb_field(struct adapter *sc, struct toepcb *toep, int ctrl, 1708 uint16_t word, uint64_t mask, uint64_t val) 1709 { 1710 struct wrqe *wr; 1711 struct cpl_set_tcb_field *req; 1712 1713 wr = alloc_wrqe(sizeof(*req), ctrl ? toep->ctrlq : toep->ofld_txq); 1714 if (wr == NULL) { 1715 /* XXX */ 1716 panic("%s: allocation failure.", __func__); 1717 } 1718 req = wrtod(wr); 1719 1720 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid); 1721 req->reply_ctrl = htobe16(V_NO_REPLY(1) | 1722 V_QUEUENO(toep->ofld_rxq->iq.abs_id)); 1723 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0)); 1724 req->mask = htobe64(mask); 1725 req->val = htobe64(val); 1726 1727 t4_wrq_tx(sc, wr); 1728 } 1729 1730 void 1731 t4_init_cpl_io_handlers(struct adapter *sc) 1732 { 1733 1734 t4_register_cpl_handler(sc, CPL_PEER_CLOSE, do_peer_close); 1735 t4_register_cpl_handler(sc, CPL_CLOSE_CON_RPL, do_close_con_rpl); 1736 t4_register_cpl_handler(sc, CPL_ABORT_REQ_RSS, do_abort_req); 1737 t4_register_cpl_handler(sc, CPL_ABORT_RPL_RSS, do_abort_rpl); 1738 t4_register_cpl_handler(sc, CPL_RX_DATA, do_rx_data); 1739 t4_register_cpl_handler(sc, CPL_FW4_ACK, do_fw4_ack); 1740 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, do_set_tcb_rpl); 1741 } 1742 1743 void 1744 t4_uninit_cpl_io_handlers(struct adapter *sc) 1745 { 1746 1747 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl); 1748 } 1749 #endif 1750