1 /*- 2 * Copyright (c) 2012, 2015 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 33 #ifdef TCP_OFFLOAD 34 #include <sys/param.h> 35 #include <sys/types.h> 36 #include <sys/kernel.h> 37 #include <sys/ktr.h> 38 #include <sys/module.h> 39 #include <sys/protosw.h> 40 #include <sys/domain.h> 41 #include <sys/socket.h> 42 #include <sys/socketvar.h> 43 #include <sys/sglist.h> 44 #include <netinet/in.h> 45 #include <netinet/in_pcb.h> 46 #include <netinet/ip.h> 47 #include <netinet/ip6.h> 48 #include <netinet/tcp_var.h> 49 #define TCPSTATES 50 #include <netinet/tcp_fsm.h> 51 #include <netinet/tcp_seq.h> 52 #include <netinet/toecore.h> 53 54 #include "common/common.h" 55 #include "common/t4_msg.h" 56 #include "common/t4_regs.h" 57 #include "common/t4_tcb.h" 58 #include "tom/t4_tom_l2t.h" 59 #include "tom/t4_tom.h" 60 61 VNET_DECLARE(int, tcp_do_autosndbuf); 62 #define V_tcp_do_autosndbuf VNET(tcp_do_autosndbuf) 63 VNET_DECLARE(int, tcp_autosndbuf_inc); 64 #define V_tcp_autosndbuf_inc VNET(tcp_autosndbuf_inc) 65 VNET_DECLARE(int, tcp_autosndbuf_max); 66 #define V_tcp_autosndbuf_max VNET(tcp_autosndbuf_max) 67 VNET_DECLARE(int, tcp_do_autorcvbuf); 68 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf) 69 VNET_DECLARE(int, tcp_autorcvbuf_inc); 70 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc) 71 VNET_DECLARE(int, tcp_autorcvbuf_max); 72 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max) 73 74 void 75 send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp) 76 { 77 struct wrqe *wr; 78 struct fw_flowc_wr *flowc; 79 unsigned int nparams = ftxp ? 8 : 6, flowclen; 80 struct vi_info *vi = toep->vi; 81 struct port_info *pi = vi->pi; 82 struct adapter *sc = pi->adapter; 83 unsigned int pfvf = G_FW_VIID_PFN(vi->viid) << S_FW_VIID_PFN; 84 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 85 86 KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT), 87 ("%s: flowc for tid %u sent already", __func__, toep->tid)); 88 89 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); 90 91 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq); 92 if (wr == NULL) { 93 /* XXX */ 94 panic("%s: allocation failure.", __func__); 95 } 96 flowc = wrtod(wr); 97 memset(flowc, 0, wr->wr_len); 98 99 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 100 V_FW_FLOWC_WR_NPARAMS(nparams)); 101 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | 102 V_FW_WR_FLOWID(toep->tid)); 103 104 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 105 flowc->mnemval[0].val = htobe32(pfvf); 106 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 107 flowc->mnemval[1].val = htobe32(pi->tx_chan); 108 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 109 flowc->mnemval[2].val = htobe32(pi->tx_chan); 110 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 111 flowc->mnemval[3].val = htobe32(toep->ofld_rxq->iq.abs_id); 112 if (ftxp) { 113 uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf); 114 115 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 116 flowc->mnemval[4].val = htobe32(ftxp->snd_nxt); 117 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 118 flowc->mnemval[5].val = htobe32(ftxp->rcv_nxt); 119 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 120 flowc->mnemval[6].val = htobe32(sndbuf); 121 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 122 flowc->mnemval[7].val = htobe32(ftxp->mss); 123 124 CTR6(KTR_CXGBE, 125 "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x", 126 __func__, toep->tid, ftxp->mss, sndbuf, ftxp->snd_nxt, 127 ftxp->rcv_nxt); 128 } else { 129 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF; 130 flowc->mnemval[4].val = htobe32(512); 131 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS; 132 flowc->mnemval[5].val = htobe32(512); 133 134 CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); 135 } 136 137 txsd->tx_credits = howmany(flowclen, 16); 138 txsd->plen = 0; 139 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 140 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 141 toep->tx_credits -= txsd->tx_credits; 142 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 143 toep->txsd_pidx = 0; 144 toep->txsd_avail--; 145 146 toep->flags |= TPF_FLOWC_WR_SENT; 147 t4_wrq_tx(sc, wr); 148 } 149 150 void 151 send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt) 152 { 153 struct wrqe *wr; 154 struct cpl_abort_req *req; 155 int tid = toep->tid; 156 struct inpcb *inp = toep->inp; 157 struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */ 158 159 INP_WLOCK_ASSERT(inp); 160 161 CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s", 162 __func__, toep->tid, 163 inp->inp_flags & INP_DROPPED ? "inp dropped" : 164 tcpstates[tp->t_state], 165 toep->flags, inp->inp_flags, 166 toep->flags & TPF_ABORT_SHUTDOWN ? 167 " (abort already in progress)" : ""); 168 169 if (toep->flags & TPF_ABORT_SHUTDOWN) 170 return; /* abort already in progress */ 171 172 toep->flags |= TPF_ABORT_SHUTDOWN; 173 174 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 175 ("%s: flowc_wr not sent for tid %d.", __func__, tid)); 176 177 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 178 if (wr == NULL) { 179 /* XXX */ 180 panic("%s: allocation failure.", __func__); 181 } 182 req = wrtod(wr); 183 184 INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid); 185 if (inp->inp_flags & INP_DROPPED) 186 req->rsvd0 = htobe32(snd_nxt); 187 else 188 req->rsvd0 = htobe32(tp->snd_nxt); 189 req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT); 190 req->cmd = CPL_ABORT_SEND_RST; 191 192 /* 193 * XXX: What's the correct way to tell that the inp hasn't been detached 194 * from its socket? Should I even be flushing the snd buffer here? 195 */ 196 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 197 struct socket *so = inp->inp_socket; 198 199 if (so != NULL) /* because I'm not sure. See comment above */ 200 sbflush(&so->so_snd); 201 } 202 203 t4_l2t_send(sc, wr, toep->l2te); 204 } 205 206 /* 207 * Called when a connection is established to translate the TCP options 208 * reported by HW to FreeBSD's native format. 209 */ 210 static void 211 assign_rxopt(struct tcpcb *tp, unsigned int opt) 212 { 213 struct toepcb *toep = tp->t_toe; 214 struct inpcb *inp = tp->t_inpcb; 215 struct adapter *sc = td_adapter(toep->td); 216 int n; 217 218 INP_LOCK_ASSERT(inp); 219 220 if (inp->inp_inc.inc_flags & INC_ISIPV6) 221 n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 222 else 223 n = sizeof(struct ip) + sizeof(struct tcphdr); 224 tp->t_maxseg = tp->t_maxopd = sc->params.mtus[G_TCPOPT_MSS(opt)] - n; 225 226 CTR4(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u)", __func__, toep->tid, 227 G_TCPOPT_MSS(opt), sc->params.mtus[G_TCPOPT_MSS(opt)]); 228 229 if (G_TCPOPT_TSTAMP(opt)) { 230 tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */ 231 tp->ts_recent = 0; /* hmmm */ 232 tp->ts_recent_age = tcp_ts_getticks(); 233 tp->t_maxseg -= TCPOLEN_TSTAMP_APPA; 234 } 235 236 if (G_TCPOPT_SACK(opt)) 237 tp->t_flags |= TF_SACK_PERMIT; /* should already be set */ 238 else 239 tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */ 240 241 if (G_TCPOPT_WSCALE_OK(opt)) 242 tp->t_flags |= TF_RCVD_SCALE; 243 244 /* Doing window scaling? */ 245 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 246 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 247 tp->rcv_scale = tp->request_r_scale; 248 tp->snd_scale = G_TCPOPT_SND_WSCALE(opt); 249 } 250 } 251 252 /* 253 * Completes some final bits of initialization for just established connections 254 * and changes their state to TCPS_ESTABLISHED. 255 * 256 * The ISNs are from after the exchange of SYNs. i.e., the true ISN + 1. 257 */ 258 void 259 make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn, 260 uint16_t opt) 261 { 262 struct inpcb *inp = toep->inp; 263 struct socket *so = inp->inp_socket; 264 struct tcpcb *tp = intotcpcb(inp); 265 long bufsize; 266 uint32_t iss = be32toh(snd_isn) - 1; /* true ISS */ 267 uint32_t irs = be32toh(rcv_isn) - 1; /* true IRS */ 268 uint16_t tcpopt = be16toh(opt); 269 struct flowc_tx_params ftxp; 270 271 INP_WLOCK_ASSERT(inp); 272 KASSERT(tp->t_state == TCPS_SYN_SENT || 273 tp->t_state == TCPS_SYN_RECEIVED, 274 ("%s: TCP state %s", __func__, tcpstates[tp->t_state])); 275 276 CTR4(KTR_CXGBE, "%s: tid %d, toep %p, inp %p", 277 __func__, toep->tid, toep, inp); 278 279 tp->t_state = TCPS_ESTABLISHED; 280 tp->t_starttime = ticks; 281 TCPSTAT_INC(tcps_connects); 282 283 tp->irs = irs; 284 tcp_rcvseqinit(tp); 285 tp->rcv_wnd = toep->rx_credits << 10; 286 tp->rcv_adv += tp->rcv_wnd; 287 tp->last_ack_sent = tp->rcv_nxt; 288 289 /* 290 * If we were unable to send all rx credits via opt0, save the remainder 291 * in rx_credits so that they can be handed over with the next credit 292 * update. 293 */ 294 SOCKBUF_LOCK(&so->so_rcv); 295 bufsize = select_rcv_wnd(so); 296 SOCKBUF_UNLOCK(&so->so_rcv); 297 toep->rx_credits = bufsize - tp->rcv_wnd; 298 299 tp->iss = iss; 300 tcp_sendseqinit(tp); 301 tp->snd_una = iss + 1; 302 tp->snd_nxt = iss + 1; 303 tp->snd_max = iss + 1; 304 305 assign_rxopt(tp, tcpopt); 306 307 SOCKBUF_LOCK(&so->so_snd); 308 if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf) 309 bufsize = V_tcp_autosndbuf_max; 310 else 311 bufsize = sbspace(&so->so_snd); 312 SOCKBUF_UNLOCK(&so->so_snd); 313 314 ftxp.snd_nxt = tp->snd_nxt; 315 ftxp.rcv_nxt = tp->rcv_nxt; 316 ftxp.snd_space = bufsize; 317 ftxp.mss = tp->t_maxseg; 318 send_flowc_wr(toep, &ftxp); 319 320 soisconnected(so); 321 } 322 323 static int 324 send_rx_credits(struct adapter *sc, struct toepcb *toep, int credits) 325 { 326 struct wrqe *wr; 327 struct cpl_rx_data_ack *req; 328 uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); 329 330 KASSERT(credits >= 0, ("%s: %d credits", __func__, credits)); 331 332 wr = alloc_wrqe(sizeof(*req), toep->ctrlq); 333 if (wr == NULL) 334 return (0); 335 req = wrtod(wr); 336 337 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); 338 req->credit_dack = htobe32(dack | V_RX_CREDITS(credits)); 339 340 t4_wrq_tx(sc, wr); 341 return (credits); 342 } 343 344 void 345 t4_rcvd(struct toedev *tod, struct tcpcb *tp) 346 { 347 struct adapter *sc = tod->tod_softc; 348 struct inpcb *inp = tp->t_inpcb; 349 struct socket *so = inp->inp_socket; 350 struct sockbuf *sb = &so->so_rcv; 351 struct toepcb *toep = tp->t_toe; 352 int credits; 353 354 INP_WLOCK_ASSERT(inp); 355 356 SOCKBUF_LOCK(sb); 357 KASSERT(toep->sb_cc >= sbused(sb), 358 ("%s: sb %p has more data (%d) than last time (%d).", 359 __func__, sb, sbused(sb), toep->sb_cc)); 360 361 toep->rx_credits += toep->sb_cc - sbused(sb); 362 toep->sb_cc = sbused(sb); 363 364 if (toep->rx_credits > 0 && 365 (tp->rcv_wnd <= 32 * 1024 || toep->rx_credits >= 64 * 1024 || 366 (toep->rx_credits >= 16 * 1024 && tp->rcv_wnd <= 128 * 1024) || 367 toep->sb_cc + tp->rcv_wnd < sb->sb_lowat)) { 368 369 credits = send_rx_credits(sc, toep, toep->rx_credits); 370 toep->rx_credits -= credits; 371 tp->rcv_wnd += credits; 372 tp->rcv_adv += credits; 373 } 374 SOCKBUF_UNLOCK(sb); 375 } 376 377 /* 378 * Close a connection by sending a CPL_CLOSE_CON_REQ message. 379 */ 380 static int 381 close_conn(struct adapter *sc, struct toepcb *toep) 382 { 383 struct wrqe *wr; 384 struct cpl_close_con_req *req; 385 unsigned int tid = toep->tid; 386 387 CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid, 388 toep->flags & TPF_FIN_SENT ? ", IGNORED" : ""); 389 390 if (toep->flags & TPF_FIN_SENT) 391 return (0); 392 393 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 394 ("%s: flowc_wr not sent for tid %u.", __func__, tid)); 395 396 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 397 if (wr == NULL) { 398 /* XXX */ 399 panic("%s: allocation failure.", __func__); 400 } 401 req = wrtod(wr); 402 403 req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | 404 V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr))); 405 req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) | 406 V_FW_WR_FLOWID(tid)); 407 req->wr.wr_lo = cpu_to_be64(0); 408 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 409 req->rsvd = 0; 410 411 toep->flags |= TPF_FIN_SENT; 412 toep->flags &= ~TPF_SEND_FIN; 413 t4_l2t_send(sc, wr, toep->l2te); 414 415 return (0); 416 } 417 418 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16) 419 #define MIN_OFLD_TX_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16)) 420 421 /* Maximum amount of immediate data we could stuff in a WR */ 422 static inline int 423 max_imm_payload(int tx_credits) 424 { 425 const int n = 2; /* Use only up to 2 desc for imm. data WR */ 426 427 KASSERT(tx_credits >= 0 && 428 tx_credits <= MAX_OFLD_TX_CREDITS, 429 ("%s: %d credits", __func__, tx_credits)); 430 431 if (tx_credits < MIN_OFLD_TX_CREDITS) 432 return (0); 433 434 if (tx_credits >= (n * EQ_ESIZE) / 16) 435 return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr)); 436 else 437 return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr)); 438 } 439 440 /* Maximum number of SGL entries we could stuff in a WR */ 441 static inline int 442 max_dsgl_nsegs(int tx_credits) 443 { 444 int nseg = 1; /* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */ 445 int sge_pair_credits = tx_credits - MIN_OFLD_TX_CREDITS; 446 447 KASSERT(tx_credits >= 0 && 448 tx_credits <= MAX_OFLD_TX_CREDITS, 449 ("%s: %d credits", __func__, tx_credits)); 450 451 if (tx_credits < MIN_OFLD_TX_CREDITS) 452 return (0); 453 454 nseg += 2 * (sge_pair_credits * 16 / 24); 455 if ((sge_pair_credits * 16) % 24 == 16) 456 nseg++; 457 458 return (nseg); 459 } 460 461 static inline void 462 write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen, 463 unsigned int plen, uint8_t credits, int shove, int ulp_submode, int txalign) 464 { 465 struct fw_ofld_tx_data_wr *txwr = dst; 466 467 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) | 468 V_FW_WR_IMMDLEN(immdlen)); 469 txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) | 470 V_FW_WR_LEN16(credits)); 471 txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(toep->ulp_mode) | 472 V_TX_ULP_SUBMODE(ulp_submode) | V_TX_URG(0) | V_TX_SHOVE(shove)); 473 txwr->plen = htobe32(plen); 474 475 if (txalign > 0) { 476 struct tcpcb *tp = intotcpcb(toep->inp); 477 478 if (plen < 2 * tp->t_maxseg || is_10G_port(toep->vi->pi)) 479 txwr->lsodisable_to_flags |= 480 htobe32(F_FW_OFLD_TX_DATA_WR_LSODISABLE); 481 else 482 txwr->lsodisable_to_flags |= 483 htobe32(F_FW_OFLD_TX_DATA_WR_ALIGNPLD | 484 (tp->t_flags & TF_NODELAY ? 0 : 485 F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE)); 486 } 487 } 488 489 /* 490 * Generate a DSGL from a starting mbuf. The total number of segments and the 491 * maximum segments in any one mbuf are provided. 492 */ 493 static void 494 write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n) 495 { 496 struct mbuf *m; 497 struct ulptx_sgl *usgl = dst; 498 int i, j, rc; 499 struct sglist sg; 500 struct sglist_seg segs[n]; 501 502 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__)); 503 504 sglist_init(&sg, n, segs); 505 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 506 V_ULPTX_NSGE(nsegs)); 507 508 i = -1; 509 for (m = start; m != stop; m = m->m_next) { 510 rc = sglist_append(&sg, mtod(m, void *), m->m_len); 511 if (__predict_false(rc != 0)) 512 panic("%s: sglist_append %d", __func__, rc); 513 514 for (j = 0; j < sg.sg_nseg; i++, j++) { 515 if (i < 0) { 516 usgl->len0 = htobe32(segs[j].ss_len); 517 usgl->addr0 = htobe64(segs[j].ss_paddr); 518 } else { 519 usgl->sge[i / 2].len[i & 1] = 520 htobe32(segs[j].ss_len); 521 usgl->sge[i / 2].addr[i & 1] = 522 htobe64(segs[j].ss_paddr); 523 } 524 #ifdef INVARIANTS 525 nsegs--; 526 #endif 527 } 528 sglist_reset(&sg); 529 } 530 if (i & 1) 531 usgl->sge[i / 2].len[1] = htobe32(0); 532 KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, stop %p", 533 __func__, nsegs, start, stop)); 534 } 535 536 /* 537 * Max number of SGL entries an offload tx work request can have. This is 41 538 * (1 + 40) for a full 512B work request. 539 * fw_ofld_tx_data_wr(16B) + ulptx_sgl(16B, 1) + ulptx_sge_pair(480B, 40) 540 */ 541 #define OFLD_SGL_LEN (41) 542 543 /* 544 * Send data and/or a FIN to the peer. 545 * 546 * The socket's so_snd buffer consists of a stream of data starting with sb_mb 547 * and linked together with m_next. sb_sndptr, if set, is the last mbuf that 548 * was transmitted. 549 * 550 * drop indicates the number of bytes that should be dropped from the head of 551 * the send buffer. It is an optimization that lets do_fw4_ack avoid creating 552 * contention on the send buffer lock (before this change it used to do 553 * sowwakeup and then t4_push_frames right after that when recovering from tx 554 * stalls). When drop is set this function MUST drop the bytes and wake up any 555 * writers. 556 */ 557 void 558 t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop) 559 { 560 struct mbuf *sndptr, *m, *sb_sndptr; 561 struct fw_ofld_tx_data_wr *txwr; 562 struct wrqe *wr; 563 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 564 struct inpcb *inp = toep->inp; 565 struct tcpcb *tp = intotcpcb(inp); 566 struct socket *so = inp->inp_socket; 567 struct sockbuf *sb = &so->so_snd; 568 int tx_credits, shove, compl, space, sowwakeup; 569 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 570 571 INP_WLOCK_ASSERT(inp); 572 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 573 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 574 575 KASSERT(toep->ulp_mode == ULP_MODE_NONE || 576 toep->ulp_mode == ULP_MODE_TCPDDP || 577 toep->ulp_mode == ULP_MODE_RDMA, 578 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 579 580 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) 581 return; 582 583 /* 584 * This function doesn't resume by itself. Someone else must clear the 585 * flag and call this function. 586 */ 587 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 588 KASSERT(drop == 0, 589 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 590 return; 591 } 592 593 do { 594 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 595 max_imm = max_imm_payload(tx_credits); 596 max_nsegs = max_dsgl_nsegs(tx_credits); 597 598 SOCKBUF_LOCK(sb); 599 sowwakeup = drop; 600 if (drop) { 601 sbdrop_locked(sb, drop); 602 drop = 0; 603 } 604 sb_sndptr = sb->sb_sndptr; 605 sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb; 606 plen = 0; 607 nsegs = 0; 608 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 609 for (m = sndptr; m != NULL; m = m->m_next) { 610 int n = sglist_count(mtod(m, void *), m->m_len); 611 612 nsegs += n; 613 plen += m->m_len; 614 615 /* This mbuf sent us _over_ the nsegs limit, back out */ 616 if (plen > max_imm && nsegs > max_nsegs) { 617 nsegs -= n; 618 plen -= m->m_len; 619 if (plen == 0) { 620 /* Too few credits */ 621 toep->flags |= TPF_TX_SUSPENDED; 622 if (sowwakeup) 623 sowwakeup_locked(so); 624 else 625 SOCKBUF_UNLOCK(sb); 626 SOCKBUF_UNLOCK_ASSERT(sb); 627 return; 628 } 629 break; 630 } 631 632 if (max_nsegs_1mbuf < n) 633 max_nsegs_1mbuf = n; 634 sb_sndptr = m; /* new sb->sb_sndptr if all goes well */ 635 636 /* This mbuf put us right at the max_nsegs limit */ 637 if (plen > max_imm && nsegs == max_nsegs) { 638 m = m->m_next; 639 break; 640 } 641 } 642 643 space = sbspace(sb); 644 645 if (space <= sb->sb_hiwat * 3 / 8 && 646 toep->plen_nocompl + plen >= sb->sb_hiwat / 4) 647 compl = 1; 648 else 649 compl = 0; 650 651 if (sb->sb_flags & SB_AUTOSIZE && 652 V_tcp_do_autosndbuf && 653 sb->sb_hiwat < V_tcp_autosndbuf_max && 654 space < sb->sb_hiwat / 8) { 655 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc, 656 V_tcp_autosndbuf_max); 657 658 if (!sbreserve_locked(sb, newsize, so, NULL)) 659 sb->sb_flags &= ~SB_AUTOSIZE; 660 else 661 sowwakeup = 1; /* room available */ 662 } 663 if (sowwakeup) 664 sowwakeup_locked(so); 665 else 666 SOCKBUF_UNLOCK(sb); 667 SOCKBUF_UNLOCK_ASSERT(sb); 668 669 /* nothing to send */ 670 if (plen == 0) { 671 KASSERT(m == NULL, 672 ("%s: nothing to send, but m != NULL", __func__)); 673 break; 674 } 675 676 if (__predict_false(toep->flags & TPF_FIN_SENT)) 677 panic("%s: excess tx.", __func__); 678 679 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME); 680 if (plen <= max_imm) { 681 682 /* Immediate data tx */ 683 684 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 685 toep->ofld_txq); 686 if (wr == NULL) { 687 /* XXX: how will we recover from this? */ 688 toep->flags |= TPF_TX_SUSPENDED; 689 return; 690 } 691 txwr = wrtod(wr); 692 credits = howmany(wr->wr_len, 16); 693 write_tx_wr(txwr, toep, plen, plen, credits, shove, 0, 694 sc->tt.tx_align); 695 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 696 nsegs = 0; 697 } else { 698 int wr_len; 699 700 /* DSGL tx */ 701 702 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 703 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 704 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 705 if (wr == NULL) { 706 /* XXX: how will we recover from this? */ 707 toep->flags |= TPF_TX_SUSPENDED; 708 return; 709 } 710 txwr = wrtod(wr); 711 credits = howmany(wr_len, 16); 712 write_tx_wr(txwr, toep, 0, plen, credits, shove, 0, 713 sc->tt.tx_align); 714 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 715 max_nsegs_1mbuf); 716 if (wr_len & 0xf) { 717 uint64_t *pad = (uint64_t *) 718 ((uintptr_t)txwr + wr_len); 719 *pad = 0; 720 } 721 } 722 723 KASSERT(toep->tx_credits >= credits, 724 ("%s: not enough credits", __func__)); 725 726 toep->tx_credits -= credits; 727 toep->tx_nocompl += credits; 728 toep->plen_nocompl += plen; 729 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 730 toep->tx_nocompl >= toep->tx_total / 4) 731 compl = 1; 732 733 if (compl || toep->ulp_mode == ULP_MODE_RDMA) { 734 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 735 toep->tx_nocompl = 0; 736 toep->plen_nocompl = 0; 737 } 738 739 tp->snd_nxt += plen; 740 tp->snd_max += plen; 741 742 SOCKBUF_LOCK(sb); 743 KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__)); 744 sb->sb_sndptr = sb_sndptr; 745 SOCKBUF_UNLOCK(sb); 746 747 toep->flags |= TPF_TX_DATA_SENT; 748 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 749 toep->flags |= TPF_TX_SUSPENDED; 750 751 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 752 txsd->plen = plen; 753 txsd->tx_credits = credits; 754 txsd++; 755 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 756 toep->txsd_pidx = 0; 757 txsd = &toep->txsd[0]; 758 } 759 toep->txsd_avail--; 760 761 t4_l2t_send(sc, wr, toep->l2te); 762 } while (m != NULL); 763 764 /* Send a FIN if requested, but only if there's no more data to send */ 765 if (m == NULL && toep->flags & TPF_SEND_FIN) 766 close_conn(sc, toep); 767 } 768 769 static inline void 770 rqdrop_locked(struct mbufq *q, int plen) 771 { 772 struct mbuf *m; 773 774 while (plen > 0) { 775 m = mbufq_dequeue(q); 776 777 /* Too many credits. */ 778 MPASS(m != NULL); 779 M_ASSERTPKTHDR(m); 780 781 /* Partial credits. */ 782 MPASS(plen >= m->m_pkthdr.len); 783 784 plen -= m->m_pkthdr.len; 785 m_freem(m); 786 } 787 } 788 789 void 790 t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop) 791 { 792 struct mbuf *sndptr, *m; 793 struct fw_ofld_tx_data_wr *txwr; 794 struct wrqe *wr; 795 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 796 u_int adjusted_plen, ulp_submode; 797 struct inpcb *inp = toep->inp; 798 struct tcpcb *tp = intotcpcb(inp); 799 int tx_credits, shove; 800 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 801 struct mbufq *pduq = &toep->ulp_pduq; 802 static const u_int ulp_extra_len[] = {0, 4, 4, 8}; 803 804 INP_WLOCK_ASSERT(inp); 805 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 806 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 807 KASSERT(toep->ulp_mode == ULP_MODE_ISCSI, 808 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 809 810 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) 811 return; 812 813 /* 814 * This function doesn't resume by itself. Someone else must clear the 815 * flag and call this function. 816 */ 817 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 818 KASSERT(drop == 0, 819 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 820 return; 821 } 822 823 if (drop) 824 rqdrop_locked(&toep->ulp_pdu_reclaimq, drop); 825 826 while ((sndptr = mbufq_first(pduq)) != NULL) { 827 M_ASSERTPKTHDR(sndptr); 828 829 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 830 max_imm = max_imm_payload(tx_credits); 831 max_nsegs = max_dsgl_nsegs(tx_credits); 832 833 plen = 0; 834 nsegs = 0; 835 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 836 for (m = sndptr; m != NULL; m = m->m_next) { 837 int n = sglist_count(mtod(m, void *), m->m_len); 838 839 nsegs += n; 840 plen += m->m_len; 841 842 /* 843 * This mbuf would send us _over_ the nsegs limit. 844 * Suspend tx because the PDU can't be sent out. 845 */ 846 if (plen > max_imm && nsegs > max_nsegs) { 847 toep->flags |= TPF_TX_SUSPENDED; 848 return; 849 } 850 851 if (max_nsegs_1mbuf < n) 852 max_nsegs_1mbuf = n; 853 } 854 855 if (__predict_false(toep->flags & TPF_FIN_SENT)) 856 panic("%s: excess tx.", __func__); 857 858 /* 859 * We have a PDU to send. All of it goes out in one WR so 'm' 860 * is NULL. A PDU's length is always a multiple of 4. 861 */ 862 MPASS(m == NULL); 863 MPASS((plen & 3) == 0); 864 MPASS(sndptr->m_pkthdr.len == plen); 865 866 shove = !(tp->t_flags & TF_MORETOCOME); 867 ulp_submode = mbuf_ulp_submode(sndptr); 868 MPASS(ulp_submode < nitems(ulp_extra_len)); 869 870 /* 871 * plen doesn't include header and data digests, which are 872 * generated and inserted in the right places by the TOE, but 873 * they do occupy TCP sequence space and need to be accounted 874 * for. 875 */ 876 adjusted_plen = plen + ulp_extra_len[ulp_submode]; 877 if (plen <= max_imm) { 878 879 /* Immediate data tx */ 880 881 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 882 toep->ofld_txq); 883 if (wr == NULL) { 884 /* XXX: how will we recover from this? */ 885 toep->flags |= TPF_TX_SUSPENDED; 886 return; 887 } 888 txwr = wrtod(wr); 889 credits = howmany(wr->wr_len, 16); 890 write_tx_wr(txwr, toep, plen, adjusted_plen, credits, 891 shove, ulp_submode, sc->tt.tx_align); 892 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 893 nsegs = 0; 894 } else { 895 int wr_len; 896 897 /* DSGL tx */ 898 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 899 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 900 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 901 if (wr == NULL) { 902 /* XXX: how will we recover from this? */ 903 toep->flags |= TPF_TX_SUSPENDED; 904 return; 905 } 906 txwr = wrtod(wr); 907 credits = howmany(wr_len, 16); 908 write_tx_wr(txwr, toep, 0, adjusted_plen, credits, 909 shove, ulp_submode, sc->tt.tx_align); 910 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 911 max_nsegs_1mbuf); 912 if (wr_len & 0xf) { 913 uint64_t *pad = (uint64_t *) 914 ((uintptr_t)txwr + wr_len); 915 *pad = 0; 916 } 917 } 918 919 KASSERT(toep->tx_credits >= credits, 920 ("%s: not enough credits", __func__)); 921 922 m = mbufq_dequeue(pduq); 923 MPASS(m == sndptr); 924 mbufq_enqueue(&toep->ulp_pdu_reclaimq, m); 925 926 toep->tx_credits -= credits; 927 toep->tx_nocompl += credits; 928 toep->plen_nocompl += plen; 929 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 930 toep->tx_nocompl >= toep->tx_total / 4) { 931 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 932 toep->tx_nocompl = 0; 933 toep->plen_nocompl = 0; 934 } 935 936 tp->snd_nxt += adjusted_plen; 937 tp->snd_max += adjusted_plen; 938 939 toep->flags |= TPF_TX_DATA_SENT; 940 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 941 toep->flags |= TPF_TX_SUSPENDED; 942 943 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 944 txsd->plen = plen; 945 txsd->tx_credits = credits; 946 txsd++; 947 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 948 toep->txsd_pidx = 0; 949 txsd = &toep->txsd[0]; 950 } 951 toep->txsd_avail--; 952 953 t4_l2t_send(sc, wr, toep->l2te); 954 } 955 956 /* Send a FIN if requested, but only if there are no more PDUs to send */ 957 if (mbufq_first(pduq) == NULL && toep->flags & TPF_SEND_FIN) 958 close_conn(sc, toep); 959 } 960 961 int 962 t4_tod_output(struct toedev *tod, struct tcpcb *tp) 963 { 964 struct adapter *sc = tod->tod_softc; 965 #ifdef INVARIANTS 966 struct inpcb *inp = tp->t_inpcb; 967 #endif 968 struct toepcb *toep = tp->t_toe; 969 970 INP_WLOCK_ASSERT(inp); 971 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 972 ("%s: inp %p dropped.", __func__, inp)); 973 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 974 975 if (toep->ulp_mode == ULP_MODE_ISCSI) 976 t4_push_pdus(sc, toep, 0); 977 else 978 t4_push_frames(sc, toep, 0); 979 980 return (0); 981 } 982 983 int 984 t4_send_fin(struct toedev *tod, struct tcpcb *tp) 985 { 986 struct adapter *sc = tod->tod_softc; 987 #ifdef INVARIANTS 988 struct inpcb *inp = tp->t_inpcb; 989 #endif 990 struct toepcb *toep = tp->t_toe; 991 992 INP_WLOCK_ASSERT(inp); 993 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 994 ("%s: inp %p dropped.", __func__, inp)); 995 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 996 997 toep->flags |= TPF_SEND_FIN; 998 if (tp->t_state >= TCPS_ESTABLISHED) { 999 if (toep->ulp_mode == ULP_MODE_ISCSI) 1000 t4_push_pdus(sc, toep, 0); 1001 else 1002 t4_push_frames(sc, toep, 0); 1003 } 1004 1005 return (0); 1006 } 1007 1008 int 1009 t4_send_rst(struct toedev *tod, struct tcpcb *tp) 1010 { 1011 struct adapter *sc = tod->tod_softc; 1012 #if defined(INVARIANTS) 1013 struct inpcb *inp = tp->t_inpcb; 1014 #endif 1015 struct toepcb *toep = tp->t_toe; 1016 1017 INP_WLOCK_ASSERT(inp); 1018 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1019 ("%s: inp %p dropped.", __func__, inp)); 1020 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1021 1022 /* hmmmm */ 1023 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 1024 ("%s: flowc for tid %u [%s] not sent already", 1025 __func__, toep->tid, tcpstates[tp->t_state])); 1026 1027 send_reset(sc, toep, 0); 1028 return (0); 1029 } 1030 1031 /* 1032 * Peer has sent us a FIN. 1033 */ 1034 static int 1035 do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1036 { 1037 struct adapter *sc = iq->adapter; 1038 const struct cpl_peer_close *cpl = (const void *)(rss + 1); 1039 unsigned int tid = GET_TID(cpl); 1040 struct toepcb *toep = lookup_tid(sc, tid); 1041 struct inpcb *inp = toep->inp; 1042 struct tcpcb *tp = NULL; 1043 struct socket *so; 1044 struct sockbuf *sb; 1045 #ifdef INVARIANTS 1046 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1047 #endif 1048 1049 KASSERT(opcode == CPL_PEER_CLOSE, 1050 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1051 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1052 1053 if (__predict_false(toep->flags & TPF_SYNQE)) { 1054 #ifdef INVARIANTS 1055 struct synq_entry *synqe = (void *)toep; 1056 1057 INP_WLOCK(synqe->lctx->inp); 1058 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1059 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1060 ("%s: listen socket closed but tid %u not aborted.", 1061 __func__, tid)); 1062 } else { 1063 /* 1064 * do_pass_accept_req is still running and will 1065 * eventually take care of this tid. 1066 */ 1067 } 1068 INP_WUNLOCK(synqe->lctx->inp); 1069 #endif 1070 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1071 toep, toep->flags); 1072 return (0); 1073 } 1074 1075 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1076 1077 INP_INFO_RLOCK(&V_tcbinfo); 1078 INP_WLOCK(inp); 1079 tp = intotcpcb(inp); 1080 1081 CTR5(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x, inp %p", __func__, 1082 tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, inp); 1083 1084 if (toep->flags & TPF_ABORT_SHUTDOWN) 1085 goto done; 1086 1087 tp->rcv_nxt++; /* FIN */ 1088 1089 so = inp->inp_socket; 1090 sb = &so->so_rcv; 1091 SOCKBUF_LOCK(sb); 1092 if (__predict_false(toep->ddp_flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE))) { 1093 handle_ddp_close(toep, tp, sb, cpl->rcv_nxt); 1094 } 1095 socantrcvmore_locked(so); /* unlocks the sockbuf */ 1096 1097 if (toep->ulp_mode != ULP_MODE_RDMA) { 1098 KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt), 1099 ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt, 1100 be32toh(cpl->rcv_nxt))); 1101 } 1102 1103 switch (tp->t_state) { 1104 case TCPS_SYN_RECEIVED: 1105 tp->t_starttime = ticks; 1106 /* FALLTHROUGH */ 1107 1108 case TCPS_ESTABLISHED: 1109 tp->t_state = TCPS_CLOSE_WAIT; 1110 break; 1111 1112 case TCPS_FIN_WAIT_1: 1113 tp->t_state = TCPS_CLOSING; 1114 break; 1115 1116 case TCPS_FIN_WAIT_2: 1117 tcp_twstart(tp); 1118 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1119 INP_INFO_RUNLOCK(&V_tcbinfo); 1120 1121 INP_WLOCK(inp); 1122 final_cpl_received(toep); 1123 return (0); 1124 1125 default: 1126 log(LOG_ERR, "%s: TID %u received CPL_PEER_CLOSE in state %d\n", 1127 __func__, tid, tp->t_state); 1128 } 1129 done: 1130 INP_WUNLOCK(inp); 1131 INP_INFO_RUNLOCK(&V_tcbinfo); 1132 return (0); 1133 } 1134 1135 /* 1136 * Peer has ACK'd our FIN. 1137 */ 1138 static int 1139 do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss, 1140 struct mbuf *m) 1141 { 1142 struct adapter *sc = iq->adapter; 1143 const struct cpl_close_con_rpl *cpl = (const void *)(rss + 1); 1144 unsigned int tid = GET_TID(cpl); 1145 struct toepcb *toep = lookup_tid(sc, tid); 1146 struct inpcb *inp = toep->inp; 1147 struct tcpcb *tp = NULL; 1148 struct socket *so = NULL; 1149 #ifdef INVARIANTS 1150 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1151 #endif 1152 1153 KASSERT(opcode == CPL_CLOSE_CON_RPL, 1154 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1155 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1156 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1157 1158 INP_INFO_RLOCK(&V_tcbinfo); 1159 INP_WLOCK(inp); 1160 tp = intotcpcb(inp); 1161 1162 CTR4(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x", 1163 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags); 1164 1165 if (toep->flags & TPF_ABORT_SHUTDOWN) 1166 goto done; 1167 1168 so = inp->inp_socket; 1169 tp->snd_una = be32toh(cpl->snd_nxt) - 1; /* exclude FIN */ 1170 1171 switch (tp->t_state) { 1172 case TCPS_CLOSING: /* see TCPS_FIN_WAIT_2 in do_peer_close too */ 1173 tcp_twstart(tp); 1174 release: 1175 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1176 INP_INFO_RUNLOCK(&V_tcbinfo); 1177 1178 INP_WLOCK(inp); 1179 final_cpl_received(toep); /* no more CPLs expected */ 1180 1181 return (0); 1182 case TCPS_LAST_ACK: 1183 if (tcp_close(tp)) 1184 INP_WUNLOCK(inp); 1185 goto release; 1186 1187 case TCPS_FIN_WAIT_1: 1188 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 1189 soisdisconnected(so); 1190 tp->t_state = TCPS_FIN_WAIT_2; 1191 break; 1192 1193 default: 1194 log(LOG_ERR, 1195 "%s: TID %u received CPL_CLOSE_CON_RPL in state %s\n", 1196 __func__, tid, tcpstates[tp->t_state]); 1197 } 1198 done: 1199 INP_WUNLOCK(inp); 1200 INP_INFO_RUNLOCK(&V_tcbinfo); 1201 return (0); 1202 } 1203 1204 void 1205 send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid, 1206 int rst_status) 1207 { 1208 struct wrqe *wr; 1209 struct cpl_abort_rpl *cpl; 1210 1211 wr = alloc_wrqe(sizeof(*cpl), ofld_txq); 1212 if (wr == NULL) { 1213 /* XXX */ 1214 panic("%s: allocation failure.", __func__); 1215 } 1216 cpl = wrtod(wr); 1217 1218 INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid); 1219 cpl->cmd = rst_status; 1220 1221 t4_wrq_tx(sc, wr); 1222 } 1223 1224 static int 1225 abort_status_to_errno(struct tcpcb *tp, unsigned int abort_reason) 1226 { 1227 switch (abort_reason) { 1228 case CPL_ERR_BAD_SYN: 1229 case CPL_ERR_CONN_RESET: 1230 return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET); 1231 case CPL_ERR_XMIT_TIMEDOUT: 1232 case CPL_ERR_PERSIST_TIMEDOUT: 1233 case CPL_ERR_FINWAIT2_TIMEDOUT: 1234 case CPL_ERR_KEEPALIVE_TIMEDOUT: 1235 return (ETIMEDOUT); 1236 default: 1237 return (EIO); 1238 } 1239 } 1240 1241 /* 1242 * TCP RST from the peer, timeout, or some other such critical error. 1243 */ 1244 static int 1245 do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1246 { 1247 struct adapter *sc = iq->adapter; 1248 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); 1249 unsigned int tid = GET_TID(cpl); 1250 struct toepcb *toep = lookup_tid(sc, tid); 1251 struct sge_wrq *ofld_txq = toep->ofld_txq; 1252 struct inpcb *inp; 1253 struct tcpcb *tp; 1254 #ifdef INVARIANTS 1255 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1256 #endif 1257 1258 KASSERT(opcode == CPL_ABORT_REQ_RSS, 1259 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1260 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1261 1262 if (toep->flags & TPF_SYNQE) 1263 return (do_abort_req_synqe(iq, rss, m)); 1264 1265 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1266 1267 if (negative_advice(cpl->status)) { 1268 CTR4(KTR_CXGBE, "%s: negative advice %d for tid %d (0x%x)", 1269 __func__, cpl->status, tid, toep->flags); 1270 return (0); /* Ignore negative advice */ 1271 } 1272 1273 inp = toep->inp; 1274 INP_INFO_RLOCK(&V_tcbinfo); /* for tcp_close */ 1275 INP_WLOCK(inp); 1276 1277 tp = intotcpcb(inp); 1278 1279 CTR6(KTR_CXGBE, 1280 "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x, status %d", 1281 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, 1282 inp->inp_flags, cpl->status); 1283 1284 /* 1285 * If we'd initiated an abort earlier the reply to it is responsible for 1286 * cleaning up resources. Otherwise we tear everything down right here 1287 * right now. We owe the T4 a CPL_ABORT_RPL no matter what. 1288 */ 1289 if (toep->flags & TPF_ABORT_SHUTDOWN) { 1290 INP_WUNLOCK(inp); 1291 goto done; 1292 } 1293 toep->flags |= TPF_ABORT_SHUTDOWN; 1294 1295 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 1296 struct socket *so = inp->inp_socket; 1297 1298 if (so != NULL) 1299 so_error_set(so, abort_status_to_errno(tp, 1300 cpl->status)); 1301 tp = tcp_close(tp); 1302 if (tp == NULL) 1303 INP_WLOCK(inp); /* re-acquire */ 1304 } 1305 1306 final_cpl_received(toep); 1307 done: 1308 INP_INFO_RUNLOCK(&V_tcbinfo); 1309 send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST); 1310 return (0); 1311 } 1312 1313 /* 1314 * Reply to the CPL_ABORT_REQ (send_reset) 1315 */ 1316 static int 1317 do_abort_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1318 { 1319 struct adapter *sc = iq->adapter; 1320 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); 1321 unsigned int tid = GET_TID(cpl); 1322 struct toepcb *toep = lookup_tid(sc, tid); 1323 struct inpcb *inp = toep->inp; 1324 #ifdef INVARIANTS 1325 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1326 #endif 1327 1328 KASSERT(opcode == CPL_ABORT_RPL_RSS, 1329 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1330 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1331 1332 if (toep->flags & TPF_SYNQE) 1333 return (do_abort_rpl_synqe(iq, rss, m)); 1334 1335 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1336 1337 CTR5(KTR_CXGBE, "%s: tid %u, toep %p, inp %p, status %d", 1338 __func__, tid, toep, inp, cpl->status); 1339 1340 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1341 ("%s: wasn't expecting abort reply", __func__)); 1342 1343 INP_WLOCK(inp); 1344 final_cpl_received(toep); 1345 1346 return (0); 1347 } 1348 1349 static int 1350 do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1351 { 1352 struct adapter *sc = iq->adapter; 1353 const struct cpl_rx_data *cpl = mtod(m, const void *); 1354 unsigned int tid = GET_TID(cpl); 1355 struct toepcb *toep = lookup_tid(sc, tid); 1356 struct inpcb *inp = toep->inp; 1357 struct tcpcb *tp; 1358 struct socket *so; 1359 struct sockbuf *sb; 1360 int len; 1361 uint32_t ddp_placed = 0; 1362 1363 if (__predict_false(toep->flags & TPF_SYNQE)) { 1364 #ifdef INVARIANTS 1365 struct synq_entry *synqe = (void *)toep; 1366 1367 INP_WLOCK(synqe->lctx->inp); 1368 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1369 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1370 ("%s: listen socket closed but tid %u not aborted.", 1371 __func__, tid)); 1372 } else { 1373 /* 1374 * do_pass_accept_req is still running and will 1375 * eventually take care of this tid. 1376 */ 1377 } 1378 INP_WUNLOCK(synqe->lctx->inp); 1379 #endif 1380 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1381 toep, toep->flags); 1382 m_freem(m); 1383 return (0); 1384 } 1385 1386 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1387 1388 /* strip off CPL header */ 1389 m_adj(m, sizeof(*cpl)); 1390 len = m->m_pkthdr.len; 1391 1392 INP_WLOCK(inp); 1393 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) { 1394 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", 1395 __func__, tid, len, inp->inp_flags); 1396 INP_WUNLOCK(inp); 1397 m_freem(m); 1398 return (0); 1399 } 1400 1401 tp = intotcpcb(inp); 1402 1403 if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq))) 1404 ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt; 1405 1406 tp->rcv_nxt += len; 1407 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); 1408 tp->rcv_wnd -= len; 1409 tp->t_rcvtime = ticks; 1410 1411 so = inp_inpcbtosocket(inp); 1412 sb = &so->so_rcv; 1413 SOCKBUF_LOCK(sb); 1414 1415 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 1416 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)", 1417 __func__, tid, len); 1418 m_freem(m); 1419 SOCKBUF_UNLOCK(sb); 1420 INP_WUNLOCK(inp); 1421 1422 INP_INFO_RLOCK(&V_tcbinfo); 1423 INP_WLOCK(inp); 1424 tp = tcp_drop(tp, ECONNRESET); 1425 if (tp) 1426 INP_WUNLOCK(inp); 1427 INP_INFO_RUNLOCK(&V_tcbinfo); 1428 1429 return (0); 1430 } 1431 1432 /* receive buffer autosize */ 1433 if (sb->sb_flags & SB_AUTOSIZE && 1434 V_tcp_do_autorcvbuf && 1435 sb->sb_hiwat < V_tcp_autorcvbuf_max && 1436 len > (sbspace(sb) / 8 * 7)) { 1437 unsigned int hiwat = sb->sb_hiwat; 1438 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc, 1439 V_tcp_autorcvbuf_max); 1440 1441 if (!sbreserve_locked(sb, newsize, so, NULL)) 1442 sb->sb_flags &= ~SB_AUTOSIZE; 1443 else 1444 toep->rx_credits += newsize - hiwat; 1445 } 1446 1447 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1448 int changed = !(toep->ddp_flags & DDP_ON) ^ cpl->ddp_off; 1449 1450 if (changed) { 1451 if (toep->ddp_flags & DDP_SC_REQ) 1452 toep->ddp_flags ^= DDP_ON | DDP_SC_REQ; 1453 else { 1454 KASSERT(cpl->ddp_off == 1, 1455 ("%s: DDP switched on by itself.", 1456 __func__)); 1457 1458 /* Fell out of DDP mode */ 1459 toep->ddp_flags &= ~(DDP_ON | DDP_BUF0_ACTIVE | 1460 DDP_BUF1_ACTIVE); 1461 1462 if (ddp_placed) 1463 insert_ddp_data(toep, ddp_placed); 1464 } 1465 } 1466 1467 if ((toep->ddp_flags & DDP_OK) == 0 && 1468 time_uptime >= toep->ddp_disabled + DDP_RETRY_WAIT) { 1469 toep->ddp_score = DDP_LOW_SCORE; 1470 toep->ddp_flags |= DDP_OK; 1471 CTR3(KTR_CXGBE, "%s: tid %u DDP_OK @ %u", 1472 __func__, tid, time_uptime); 1473 } 1474 1475 if (toep->ddp_flags & DDP_ON) { 1476 1477 /* 1478 * CPL_RX_DATA with DDP on can only be an indicate. Ask 1479 * soreceive to post a buffer or disable DDP. The 1480 * payload that arrived in this indicate is appended to 1481 * the socket buffer as usual. 1482 */ 1483 1484 #if 0 1485 CTR5(KTR_CXGBE, 1486 "%s: tid %u (0x%x) DDP indicate (seq 0x%x, len %d)", 1487 __func__, tid, toep->flags, be32toh(cpl->seq), len); 1488 #endif 1489 sb->sb_flags |= SB_DDP_INDICATE; 1490 } else if ((toep->ddp_flags & (DDP_OK|DDP_SC_REQ)) == DDP_OK && 1491 tp->rcv_wnd > DDP_RSVD_WIN && len >= sc->tt.ddp_thres) { 1492 1493 /* 1494 * DDP allowed but isn't on (and a request to switch it 1495 * on isn't pending either), and conditions are ripe for 1496 * it to work. Switch it on. 1497 */ 1498 1499 enable_ddp(sc, toep); 1500 } 1501 } 1502 1503 KASSERT(toep->sb_cc >= sbused(sb), 1504 ("%s: sb %p has more data (%d) than last time (%d).", 1505 __func__, sb, sbused(sb), toep->sb_cc)); 1506 toep->rx_credits += toep->sb_cc - sbused(sb); 1507 sbappendstream_locked(sb, m, 0); 1508 toep->sb_cc = sbused(sb); 1509 if (toep->rx_credits > 0 && toep->sb_cc + tp->rcv_wnd < sb->sb_lowat) { 1510 int credits; 1511 1512 credits = send_rx_credits(sc, toep, toep->rx_credits); 1513 toep->rx_credits -= credits; 1514 tp->rcv_wnd += credits; 1515 tp->rcv_adv += credits; 1516 } 1517 sorwakeup_locked(so); 1518 SOCKBUF_UNLOCK_ASSERT(sb); 1519 1520 INP_WUNLOCK(inp); 1521 return (0); 1522 } 1523 1524 #define S_CPL_FW4_ACK_OPCODE 24 1525 #define M_CPL_FW4_ACK_OPCODE 0xff 1526 #define V_CPL_FW4_ACK_OPCODE(x) ((x) << S_CPL_FW4_ACK_OPCODE) 1527 #define G_CPL_FW4_ACK_OPCODE(x) \ 1528 (((x) >> S_CPL_FW4_ACK_OPCODE) & M_CPL_FW4_ACK_OPCODE) 1529 1530 #define S_CPL_FW4_ACK_FLOWID 0 1531 #define M_CPL_FW4_ACK_FLOWID 0xffffff 1532 #define V_CPL_FW4_ACK_FLOWID(x) ((x) << S_CPL_FW4_ACK_FLOWID) 1533 #define G_CPL_FW4_ACK_FLOWID(x) \ 1534 (((x) >> S_CPL_FW4_ACK_FLOWID) & M_CPL_FW4_ACK_FLOWID) 1535 1536 #define S_CPL_FW4_ACK_CR 24 1537 #define M_CPL_FW4_ACK_CR 0xff 1538 #define V_CPL_FW4_ACK_CR(x) ((x) << S_CPL_FW4_ACK_CR) 1539 #define G_CPL_FW4_ACK_CR(x) (((x) >> S_CPL_FW4_ACK_CR) & M_CPL_FW4_ACK_CR) 1540 1541 #define S_CPL_FW4_ACK_SEQVAL 0 1542 #define M_CPL_FW4_ACK_SEQVAL 0x1 1543 #define V_CPL_FW4_ACK_SEQVAL(x) ((x) << S_CPL_FW4_ACK_SEQVAL) 1544 #define G_CPL_FW4_ACK_SEQVAL(x) \ 1545 (((x) >> S_CPL_FW4_ACK_SEQVAL) & M_CPL_FW4_ACK_SEQVAL) 1546 #define F_CPL_FW4_ACK_SEQVAL V_CPL_FW4_ACK_SEQVAL(1U) 1547 1548 static int 1549 do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1550 { 1551 struct adapter *sc = iq->adapter; 1552 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 1553 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 1554 struct toepcb *toep = lookup_tid(sc, tid); 1555 struct inpcb *inp; 1556 struct tcpcb *tp; 1557 struct socket *so; 1558 uint8_t credits = cpl->credits; 1559 struct ofld_tx_sdesc *txsd; 1560 int plen; 1561 #ifdef INVARIANTS 1562 unsigned int opcode = G_CPL_FW4_ACK_OPCODE(be32toh(OPCODE_TID(cpl))); 1563 #endif 1564 1565 /* 1566 * Very unusual case: we'd sent a flowc + abort_req for a synq entry and 1567 * now this comes back carrying the credits for the flowc. 1568 */ 1569 if (__predict_false(toep->flags & TPF_SYNQE)) { 1570 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1571 ("%s: credits for a synq entry %p", __func__, toep)); 1572 return (0); 1573 } 1574 1575 inp = toep->inp; 1576 1577 KASSERT(opcode == CPL_FW4_ACK, 1578 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1579 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1580 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1581 1582 INP_WLOCK(inp); 1583 1584 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) { 1585 INP_WUNLOCK(inp); 1586 return (0); 1587 } 1588 1589 KASSERT((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0, 1590 ("%s: inp_flags 0x%x", __func__, inp->inp_flags)); 1591 1592 tp = intotcpcb(inp); 1593 1594 if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) { 1595 tcp_seq snd_una = be32toh(cpl->snd_una); 1596 1597 #ifdef INVARIANTS 1598 if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) { 1599 log(LOG_ERR, 1600 "%s: unexpected seq# %x for TID %u, snd_una %x\n", 1601 __func__, snd_una, toep->tid, tp->snd_una); 1602 } 1603 #endif 1604 1605 if (tp->snd_una != snd_una) { 1606 tp->snd_una = snd_una; 1607 tp->ts_recent_age = tcp_ts_getticks(); 1608 } 1609 } 1610 1611 so = inp->inp_socket; 1612 txsd = &toep->txsd[toep->txsd_cidx]; 1613 plen = 0; 1614 while (credits) { 1615 KASSERT(credits >= txsd->tx_credits, 1616 ("%s: too many (or partial) credits", __func__)); 1617 credits -= txsd->tx_credits; 1618 toep->tx_credits += txsd->tx_credits; 1619 plen += txsd->plen; 1620 txsd++; 1621 toep->txsd_avail++; 1622 KASSERT(toep->txsd_avail <= toep->txsd_total, 1623 ("%s: txsd avail > total", __func__)); 1624 if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) { 1625 txsd = &toep->txsd[0]; 1626 toep->txsd_cidx = 0; 1627 } 1628 } 1629 1630 if (toep->tx_credits == toep->tx_total) { 1631 toep->tx_nocompl = 0; 1632 toep->plen_nocompl = 0; 1633 } 1634 1635 if (toep->flags & TPF_TX_SUSPENDED && 1636 toep->tx_credits >= toep->tx_total / 4) { 1637 toep->flags &= ~TPF_TX_SUSPENDED; 1638 if (toep->ulp_mode == ULP_MODE_ISCSI) 1639 t4_push_pdus(sc, toep, plen); 1640 else 1641 t4_push_frames(sc, toep, plen); 1642 } else if (plen > 0) { 1643 struct sockbuf *sb = &so->so_snd; 1644 int sbu; 1645 1646 SOCKBUF_LOCK(sb); 1647 sbu = sbused(sb); 1648 if (toep->ulp_mode == ULP_MODE_ISCSI) { 1649 1650 if (__predict_false(sbu > 0)) { 1651 /* 1652 * The data trasmitted before the tid's ULP mode 1653 * changed to ISCSI is still in so_snd. 1654 * Incoming credits should account for so_snd 1655 * first. 1656 */ 1657 sbdrop_locked(sb, min(sbu, plen)); 1658 plen -= min(sbu, plen); 1659 } 1660 sowwakeup_locked(so); /* unlocks so_snd */ 1661 rqdrop_locked(&toep->ulp_pdu_reclaimq, plen); 1662 } else { 1663 sbdrop_locked(sb, plen); 1664 sowwakeup_locked(so); /* unlocks so_snd */ 1665 } 1666 SOCKBUF_UNLOCK_ASSERT(sb); 1667 } 1668 1669 INP_WUNLOCK(inp); 1670 1671 return (0); 1672 } 1673 1674 static int 1675 do_set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1676 { 1677 struct adapter *sc = iq->adapter; 1678 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 1679 unsigned int tid = GET_TID(cpl); 1680 #ifdef INVARIANTS 1681 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1682 #endif 1683 1684 KASSERT(opcode == CPL_SET_TCB_RPL, 1685 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1686 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1687 1688 if (is_ftid(sc, tid)) 1689 return (t4_filter_rpl(iq, rss, m)); /* TCB is a filter */ 1690 1691 /* 1692 * TOM and/or other ULPs don't request replies for CPL_SET_TCB or 1693 * CPL_SET_TCB_FIELD requests. This can easily change and when it does 1694 * the dispatch code will go here. 1695 */ 1696 #ifdef INVARIANTS 1697 panic("%s: Unexpected CPL_SET_TCB_RPL for tid %u on iq %p", __func__, 1698 tid, iq); 1699 #else 1700 log(LOG_ERR, "%s: Unexpected CPL_SET_TCB_RPL for tid %u on iq %p\n", 1701 __func__, tid, iq); 1702 #endif 1703 1704 return (0); 1705 } 1706 1707 void 1708 t4_set_tcb_field(struct adapter *sc, struct toepcb *toep, int ctrl, 1709 uint16_t word, uint64_t mask, uint64_t val) 1710 { 1711 struct wrqe *wr; 1712 struct cpl_set_tcb_field *req; 1713 1714 wr = alloc_wrqe(sizeof(*req), ctrl ? toep->ctrlq : toep->ofld_txq); 1715 if (wr == NULL) { 1716 /* XXX */ 1717 panic("%s: allocation failure.", __func__); 1718 } 1719 req = wrtod(wr); 1720 1721 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid); 1722 req->reply_ctrl = htobe16(V_NO_REPLY(1) | 1723 V_QUEUENO(toep->ofld_rxq->iq.abs_id)); 1724 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0)); 1725 req->mask = htobe64(mask); 1726 req->val = htobe64(val); 1727 1728 t4_wrq_tx(sc, wr); 1729 } 1730 1731 void 1732 t4_init_cpl_io_handlers(struct adapter *sc) 1733 { 1734 1735 t4_register_cpl_handler(sc, CPL_PEER_CLOSE, do_peer_close); 1736 t4_register_cpl_handler(sc, CPL_CLOSE_CON_RPL, do_close_con_rpl); 1737 t4_register_cpl_handler(sc, CPL_ABORT_REQ_RSS, do_abort_req); 1738 t4_register_cpl_handler(sc, CPL_ABORT_RPL_RSS, do_abort_rpl); 1739 t4_register_cpl_handler(sc, CPL_RX_DATA, do_rx_data); 1740 t4_register_cpl_handler(sc, CPL_FW4_ACK, do_fw4_ack); 1741 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, do_set_tcb_rpl); 1742 } 1743 1744 void 1745 t4_uninit_cpl_io_handlers(struct adapter *sc) 1746 { 1747 1748 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl); 1749 } 1750 #endif 1751