1 /*- 2 * Copyright (c) 2012 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 33 #ifdef TCP_OFFLOAD 34 #include <sys/param.h> 35 #include <sys/types.h> 36 #include <sys/kernel.h> 37 #include <sys/ktr.h> 38 #include <sys/module.h> 39 #include <sys/protosw.h> 40 #include <sys/domain.h> 41 #include <sys/socket.h> 42 #include <sys/socketvar.h> 43 #include <sys/sglist.h> 44 #include <netinet/in.h> 45 #include <netinet/in_pcb.h> 46 #include <netinet/ip.h> 47 #include <netinet/tcp_var.h> 48 #define TCPSTATES 49 #include <netinet/tcp_fsm.h> 50 #include <netinet/tcp_seq.h> 51 #include <netinet/toecore.h> 52 53 #include "common/common.h" 54 #include "common/t4_msg.h" 55 #include "common/t4_regs.h" 56 #include "common/t4_tcb.h" 57 #include "tom/t4_tom_l2t.h" 58 #include "tom/t4_tom.h" 59 60 VNET_DECLARE(int, tcp_do_autosndbuf); 61 #define V_tcp_do_autosndbuf VNET(tcp_do_autosndbuf) 62 VNET_DECLARE(int, tcp_autosndbuf_inc); 63 #define V_tcp_autosndbuf_inc VNET(tcp_autosndbuf_inc) 64 VNET_DECLARE(int, tcp_autosndbuf_max); 65 #define V_tcp_autosndbuf_max VNET(tcp_autosndbuf_max) 66 VNET_DECLARE(int, tcp_do_autorcvbuf); 67 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf) 68 VNET_DECLARE(int, tcp_autorcvbuf_inc); 69 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc) 70 VNET_DECLARE(int, tcp_autorcvbuf_max); 71 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max) 72 73 void 74 send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp) 75 { 76 struct wrqe *wr; 77 struct fw_flowc_wr *flowc; 78 unsigned int nparams = ftxp ? 8 : 6, flowclen; 79 struct port_info *pi = toep->port; 80 struct adapter *sc = pi->adapter; 81 unsigned int pfvf = G_FW_VIID_PFN(pi->viid) << S_FW_VIID_PFN; 82 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 83 84 KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT), 85 ("%s: flowc for tid %u sent already", __func__, toep->tid)); 86 87 CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); 88 89 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); 90 91 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq); 92 if (wr == NULL) { 93 /* XXX */ 94 panic("%s: allocation failure.", __func__); 95 } 96 flowc = wrtod(wr); 97 memset(flowc, 0, wr->wr_len); 98 99 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 100 V_FW_FLOWC_WR_NPARAMS(nparams)); 101 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | 102 V_FW_WR_FLOWID(toep->tid)); 103 104 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 105 flowc->mnemval[0].val = htobe32(pfvf); 106 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 107 flowc->mnemval[1].val = htobe32(pi->tx_chan); 108 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 109 flowc->mnemval[2].val = htobe32(pi->tx_chan); 110 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 111 flowc->mnemval[3].val = htobe32(toep->ofld_rxq->iq.abs_id); 112 if (ftxp) { 113 uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf); 114 115 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 116 flowc->mnemval[4].val = htobe32(ftxp->snd_nxt); 117 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 118 flowc->mnemval[5].val = htobe32(ftxp->rcv_nxt); 119 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 120 flowc->mnemval[6].val = htobe32(sndbuf); 121 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 122 flowc->mnemval[7].val = htobe32(ftxp->mss); 123 } else { 124 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF; 125 flowc->mnemval[4].val = htobe32(512); 126 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS; 127 flowc->mnemval[5].val = htobe32(512); 128 } 129 130 txsd->tx_credits = howmany(flowclen, 16); 131 txsd->plen = 0; 132 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 133 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 134 toep->tx_credits -= txsd->tx_credits; 135 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 136 toep->txsd_pidx = 0; 137 toep->txsd_avail--; 138 139 toep->flags |= TPF_FLOWC_WR_SENT; 140 t4_wrq_tx(sc, wr); 141 } 142 143 void 144 send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt) 145 { 146 struct wrqe *wr; 147 struct cpl_abort_req *req; 148 int tid = toep->tid; 149 struct inpcb *inp = toep->inp; 150 struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */ 151 152 INP_WLOCK_ASSERT(inp); 153 154 CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s", 155 __func__, toep->tid, 156 inp->inp_flags & INP_DROPPED ? "inp dropped" : 157 tcpstates[tp->t_state], 158 toep->flags, inp->inp_flags, 159 toep->flags & TPF_ABORT_SHUTDOWN ? 160 " (abort already in progress)" : ""); 161 162 if (toep->flags & TPF_ABORT_SHUTDOWN) 163 return; /* abort already in progress */ 164 165 toep->flags |= TPF_ABORT_SHUTDOWN; 166 167 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 168 ("%s: flowc_wr not sent for tid %d.", __func__, tid)); 169 170 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 171 if (wr == NULL) { 172 /* XXX */ 173 panic("%s: allocation failure.", __func__); 174 } 175 req = wrtod(wr); 176 177 INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid); 178 if (inp->inp_flags & INP_DROPPED) 179 req->rsvd0 = htobe32(snd_nxt); 180 else 181 req->rsvd0 = htobe32(tp->snd_nxt); 182 req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT); 183 req->cmd = CPL_ABORT_SEND_RST; 184 185 /* 186 * XXX: What's the correct way to tell that the inp hasn't been detached 187 * from its socket? Should I even be flushing the snd buffer here? 188 */ 189 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 190 struct socket *so = inp->inp_socket; 191 192 if (so != NULL) /* because I'm not sure. See comment above */ 193 sbflush(&so->so_snd); 194 } 195 196 t4_l2t_send(sc, wr, toep->l2te); 197 } 198 199 /* 200 * Called when a connection is established to translate the TCP options 201 * reported by HW to FreeBSD's native format. 202 */ 203 static void 204 assign_rxopt(struct tcpcb *tp, unsigned int opt) 205 { 206 struct toepcb *toep = tp->t_toe; 207 struct adapter *sc = td_adapter(toep->td); 208 209 INP_LOCK_ASSERT(tp->t_inpcb); 210 211 tp->t_maxseg = tp->t_maxopd = sc->params.mtus[G_TCPOPT_MSS(opt)] - 40; 212 213 if (G_TCPOPT_TSTAMP(opt)) { 214 tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */ 215 tp->ts_recent = 0; /* hmmm */ 216 tp->ts_recent_age = tcp_ts_getticks(); 217 tp->t_maxseg -= TCPOLEN_TSTAMP_APPA; 218 } 219 220 if (G_TCPOPT_SACK(opt)) 221 tp->t_flags |= TF_SACK_PERMIT; /* should already be set */ 222 else 223 tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */ 224 225 if (G_TCPOPT_WSCALE_OK(opt)) 226 tp->t_flags |= TF_RCVD_SCALE; 227 228 /* Doing window scaling? */ 229 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 230 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 231 tp->rcv_scale = tp->request_r_scale; 232 tp->snd_scale = G_TCPOPT_SND_WSCALE(opt); 233 } 234 } 235 236 /* 237 * Completes some final bits of initialization for just established connections 238 * and changes their state to TCPS_ESTABLISHED. 239 * 240 * The ISNs are from after the exchange of SYNs. i.e., the true ISN + 1. 241 */ 242 void 243 make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn, 244 uint16_t opt) 245 { 246 struct inpcb *inp = toep->inp; 247 struct socket *so = inp->inp_socket; 248 struct tcpcb *tp = intotcpcb(inp); 249 long bufsize; 250 uint32_t iss = be32toh(snd_isn) - 1; /* true ISS */ 251 uint32_t irs = be32toh(rcv_isn) - 1; /* true IRS */ 252 uint16_t tcpopt = be16toh(opt); 253 struct flowc_tx_params ftxp; 254 255 INP_WLOCK_ASSERT(inp); 256 KASSERT(tp->t_state == TCPS_SYN_SENT || 257 tp->t_state == TCPS_SYN_RECEIVED, 258 ("%s: TCP state %s", __func__, tcpstates[tp->t_state])); 259 260 CTR4(KTR_CXGBE, "%s: tid %d, toep %p, inp %p", 261 __func__, toep->tid, toep, inp); 262 263 tp->t_state = TCPS_ESTABLISHED; 264 tp->t_starttime = ticks; 265 TCPSTAT_INC(tcps_connects); 266 267 tp->irs = irs; 268 tcp_rcvseqinit(tp); 269 tp->rcv_wnd = toep->rx_credits << 10; 270 tp->rcv_adv += tp->rcv_wnd; 271 tp->last_ack_sent = tp->rcv_nxt; 272 273 /* 274 * If we were unable to send all rx credits via opt0, save the remainder 275 * in rx_credits so that they can be handed over with the next credit 276 * update. 277 */ 278 SOCKBUF_LOCK(&so->so_rcv); 279 bufsize = select_rcv_wnd(so); 280 SOCKBUF_UNLOCK(&so->so_rcv); 281 toep->rx_credits = bufsize - tp->rcv_wnd; 282 283 tp->iss = iss; 284 tcp_sendseqinit(tp); 285 tp->snd_una = iss + 1; 286 tp->snd_nxt = iss + 1; 287 tp->snd_max = iss + 1; 288 289 assign_rxopt(tp, tcpopt); 290 291 SOCKBUF_LOCK(&so->so_snd); 292 if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf) 293 bufsize = V_tcp_autosndbuf_max; 294 else 295 bufsize = sbspace(&so->so_snd); 296 SOCKBUF_UNLOCK(&so->so_snd); 297 298 ftxp.snd_nxt = tp->snd_nxt; 299 ftxp.rcv_nxt = tp->rcv_nxt; 300 ftxp.snd_space = bufsize; 301 ftxp.mss = tp->t_maxseg; 302 send_flowc_wr(toep, &ftxp); 303 304 soisconnected(so); 305 } 306 307 static int 308 send_rx_credits(struct adapter *sc, struct toepcb *toep, int credits) 309 { 310 struct wrqe *wr; 311 struct cpl_rx_data_ack *req; 312 uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); 313 314 KASSERT(credits >= 0, ("%s: %d credits", __func__, credits)); 315 316 wr = alloc_wrqe(sizeof(*req), toep->ctrlq); 317 if (wr == NULL) 318 return (0); 319 req = wrtod(wr); 320 321 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); 322 req->credit_dack = htobe32(dack | V_RX_CREDITS(credits)); 323 324 t4_wrq_tx(sc, wr); 325 return (credits); 326 } 327 328 void 329 t4_rcvd(struct toedev *tod, struct tcpcb *tp) 330 { 331 struct adapter *sc = tod->tod_softc; 332 struct inpcb *inp = tp->t_inpcb; 333 struct socket *so = inp->inp_socket; 334 struct sockbuf *sb = &so->so_rcv; 335 struct toepcb *toep = tp->t_toe; 336 int credits; 337 338 INP_WLOCK_ASSERT(inp); 339 340 SOCKBUF_LOCK(sb); 341 KASSERT(toep->sb_cc >= sb->sb_cc, 342 ("%s: sb %p has more data (%d) than last time (%d).", 343 __func__, sb, sb->sb_cc, toep->sb_cc)); 344 toep->rx_credits += toep->sb_cc - sb->sb_cc; 345 toep->sb_cc = sb->sb_cc; 346 credits = toep->rx_credits; 347 SOCKBUF_UNLOCK(sb); 348 349 if (credits > 0 && 350 (credits + 16384 >= tp->rcv_wnd || credits >= 15 * 1024)) { 351 352 credits = send_rx_credits(sc, toep, credits); 353 SOCKBUF_LOCK(sb); 354 toep->rx_credits -= credits; 355 SOCKBUF_UNLOCK(sb); 356 tp->rcv_wnd += credits; 357 tp->rcv_adv += credits; 358 } 359 } 360 361 /* 362 * Close a connection by sending a CPL_CLOSE_CON_REQ message. 363 */ 364 static int 365 close_conn(struct adapter *sc, struct toepcb *toep) 366 { 367 struct wrqe *wr; 368 struct cpl_close_con_req *req; 369 unsigned int tid = toep->tid; 370 371 CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid, 372 toep->flags & TPF_FIN_SENT ? ", IGNORED" : ""); 373 374 if (toep->flags & TPF_FIN_SENT) 375 return (0); 376 377 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 378 ("%s: flowc_wr not sent for tid %u.", __func__, tid)); 379 380 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 381 if (wr == NULL) { 382 /* XXX */ 383 panic("%s: allocation failure.", __func__); 384 } 385 req = wrtod(wr); 386 387 req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | 388 V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr))); 389 req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) | 390 V_FW_WR_FLOWID(tid)); 391 req->wr.wr_lo = cpu_to_be64(0); 392 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 393 req->rsvd = 0; 394 395 toep->flags |= TPF_FIN_SENT; 396 toep->flags &= ~TPF_SEND_FIN; 397 t4_l2t_send(sc, wr, toep->l2te); 398 399 return (0); 400 } 401 402 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16) 403 #define MIN_OFLD_TX_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16)) 404 405 /* Maximum amount of immediate data we could stuff in a WR */ 406 static inline int 407 max_imm_payload(int tx_credits) 408 { 409 const int n = 2; /* Use only up to 2 desc for imm. data WR */ 410 411 KASSERT(tx_credits >= 0 && 412 tx_credits <= MAX_OFLD_TX_CREDITS, 413 ("%s: %d credits", __func__, tx_credits)); 414 415 if (tx_credits < MIN_OFLD_TX_CREDITS) 416 return (0); 417 418 if (tx_credits >= (n * EQ_ESIZE) / 16) 419 return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr)); 420 else 421 return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr)); 422 } 423 424 /* Maximum number of SGL entries we could stuff in a WR */ 425 static inline int 426 max_dsgl_nsegs(int tx_credits) 427 { 428 int nseg = 1; /* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */ 429 int sge_pair_credits = tx_credits - MIN_OFLD_TX_CREDITS; 430 431 KASSERT(tx_credits >= 0 && 432 tx_credits <= MAX_OFLD_TX_CREDITS, 433 ("%s: %d credits", __func__, tx_credits)); 434 435 if (tx_credits < MIN_OFLD_TX_CREDITS) 436 return (0); 437 438 nseg += 2 * (sge_pair_credits * 16 / 24); 439 if ((sge_pair_credits * 16) % 24 == 16) 440 nseg++; 441 442 return (nseg); 443 } 444 445 static inline void 446 write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen, 447 unsigned int plen, uint8_t credits, int shove) 448 { 449 struct fw_ofld_tx_data_wr *txwr = dst; 450 451 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) | 452 V_FW_WR_IMMDLEN(immdlen)); 453 txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) | 454 V_FW_WR_LEN16(credits)); 455 txwr->lsodisable_to_proxy = 456 htobe32(V_FW_OFLD_TX_DATA_WR_ULPMODE(toep->ulp_mode) | 457 V_FW_OFLD_TX_DATA_WR_URGENT(0) | /* XXX */ 458 V_FW_OFLD_TX_DATA_WR_SHOVE(shove)); 459 txwr->plen = htobe32(plen); 460 } 461 462 /* 463 * Generate a DSGL from a starting mbuf. The total number of segments and the 464 * maximum segments in any one mbuf are provided. 465 */ 466 static void 467 write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n) 468 { 469 struct mbuf *m; 470 struct ulptx_sgl *usgl = dst; 471 int i, j, rc; 472 struct sglist sg; 473 struct sglist_seg segs[n]; 474 475 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__)); 476 477 sglist_init(&sg, n, segs); 478 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 479 V_ULPTX_NSGE(nsegs)); 480 481 i = -1; 482 for (m = start; m != stop; m = m->m_next) { 483 rc = sglist_append(&sg, mtod(m, void *), m->m_len); 484 if (__predict_false(rc != 0)) 485 panic("%s: sglist_append %d", __func__, rc); 486 487 for (j = 0; j < sg.sg_nseg; i++, j++) { 488 if (i < 0) { 489 usgl->len0 = htobe32(segs[j].ss_len); 490 usgl->addr0 = htobe64(segs[j].ss_paddr); 491 } else { 492 usgl->sge[i / 2].len[i & 1] = 493 htobe32(segs[j].ss_len); 494 usgl->sge[i / 2].addr[i & 1] = 495 htobe64(segs[j].ss_paddr); 496 } 497 #ifdef INVARIANTS 498 nsegs--; 499 #endif 500 } 501 sglist_reset(&sg); 502 } 503 if (i & 1) 504 usgl->sge[i / 2].len[1] = htobe32(0); 505 KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, stop %p", 506 __func__, nsegs, start, stop)); 507 } 508 509 /* 510 * Max number of SGL entries an offload tx work request can have. This is 41 511 * (1 + 40) for a full 512B work request. 512 * fw_ofld_tx_data_wr(16B) + ulptx_sgl(16B, 1) + ulptx_sge_pair(480B, 40) 513 */ 514 #define OFLD_SGL_LEN (41) 515 516 /* 517 * Send data and/or a FIN to the peer. 518 * 519 * The socket's so_snd buffer consists of a stream of data starting with sb_mb 520 * and linked together with m_next. sb_sndptr, if set, is the last mbuf that 521 * was transmitted. 522 * 523 * drop indicates the number of bytes that should be dropped from the head of 524 * the send buffer. It is an optimization that lets do_fw4_ack avoid creating 525 * contention on the send buffer lock (before this change it used to do 526 * sowwakeup and then t4_push_frames right after that when recovering from tx 527 * stalls). When drop is set this function MUST drop the bytes and wake up any 528 * writers. 529 */ 530 static void 531 t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop) 532 { 533 struct mbuf *sndptr, *m, *sb_sndptr; 534 struct fw_ofld_tx_data_wr *txwr; 535 struct wrqe *wr; 536 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 537 struct inpcb *inp = toep->inp; 538 struct tcpcb *tp = intotcpcb(inp); 539 struct socket *so = inp->inp_socket; 540 struct sockbuf *sb = &so->so_snd; 541 int tx_credits, shove, compl, space, sowwakeup; 542 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 543 544 INP_WLOCK_ASSERT(inp); 545 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 546 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 547 548 KASSERT(toep->ulp_mode == ULP_MODE_NONE || 549 toep->ulp_mode == ULP_MODE_TCPDDP || 550 toep->ulp_mode == ULP_MODE_RDMA, 551 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 552 553 /* 554 * This function doesn't resume by itself. Someone else must clear the 555 * flag and call this function. 556 */ 557 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 558 KASSERT(drop == 0, 559 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 560 return; 561 } 562 563 do { 564 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 565 max_imm = max_imm_payload(tx_credits); 566 max_nsegs = max_dsgl_nsegs(tx_credits); 567 568 SOCKBUF_LOCK(sb); 569 sowwakeup = drop; 570 if (drop) { 571 sbdrop_locked(sb, drop); 572 drop = 0; 573 } 574 sb_sndptr = sb->sb_sndptr; 575 sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb; 576 plen = 0; 577 nsegs = 0; 578 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 579 for (m = sndptr; m != NULL; m = m->m_next) { 580 int n = sglist_count(mtod(m, void *), m->m_len); 581 582 nsegs += n; 583 plen += m->m_len; 584 585 /* This mbuf sent us _over_ the nsegs limit, back out */ 586 if (plen > max_imm && nsegs > max_nsegs) { 587 nsegs -= n; 588 plen -= m->m_len; 589 if (plen == 0) { 590 /* Too few credits */ 591 toep->flags |= TPF_TX_SUSPENDED; 592 if (sowwakeup) 593 sowwakeup_locked(so); 594 else 595 SOCKBUF_UNLOCK(sb); 596 SOCKBUF_UNLOCK_ASSERT(sb); 597 return; 598 } 599 break; 600 } 601 602 if (max_nsegs_1mbuf < n) 603 max_nsegs_1mbuf = n; 604 sb_sndptr = m; /* new sb->sb_sndptr if all goes well */ 605 606 /* This mbuf put us right at the max_nsegs limit */ 607 if (plen > max_imm && nsegs == max_nsegs) { 608 m = m->m_next; 609 break; 610 } 611 } 612 613 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME); 614 space = sbspace(sb); 615 616 if (space <= sb->sb_hiwat * 3 / 8 && 617 toep->plen_nocompl + plen >= sb->sb_hiwat / 4) 618 compl = 1; 619 else 620 compl = 0; 621 622 if (sb->sb_flags & SB_AUTOSIZE && 623 V_tcp_do_autosndbuf && 624 sb->sb_hiwat < V_tcp_autosndbuf_max && 625 space < sb->sb_hiwat / 8) { 626 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc, 627 V_tcp_autosndbuf_max); 628 629 if (!sbreserve_locked(sb, newsize, so, NULL)) 630 sb->sb_flags &= ~SB_AUTOSIZE; 631 else 632 sowwakeup = 1; /* room available */ 633 } 634 if (sowwakeup) 635 sowwakeup_locked(so); 636 else 637 SOCKBUF_UNLOCK(sb); 638 SOCKBUF_UNLOCK_ASSERT(sb); 639 640 /* nothing to send */ 641 if (plen == 0) { 642 KASSERT(m == NULL, 643 ("%s: nothing to send, but m != NULL", __func__)); 644 break; 645 } 646 647 if (__predict_false(toep->flags & TPF_FIN_SENT)) 648 panic("%s: excess tx.", __func__); 649 650 if (plen <= max_imm) { 651 652 /* Immediate data tx */ 653 654 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 655 toep->ofld_txq); 656 if (wr == NULL) { 657 /* XXX: how will we recover from this? */ 658 toep->flags |= TPF_TX_SUSPENDED; 659 return; 660 } 661 txwr = wrtod(wr); 662 credits = howmany(wr->wr_len, 16); 663 write_tx_wr(txwr, toep, plen, plen, credits, shove); 664 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 665 nsegs = 0; 666 } else { 667 int wr_len; 668 669 /* DSGL tx */ 670 671 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 672 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 673 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 674 if (wr == NULL) { 675 /* XXX: how will we recover from this? */ 676 toep->flags |= TPF_TX_SUSPENDED; 677 return; 678 } 679 txwr = wrtod(wr); 680 credits = howmany(wr_len, 16); 681 write_tx_wr(txwr, toep, 0, plen, credits, shove); 682 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 683 max_nsegs_1mbuf); 684 if (wr_len & 0xf) { 685 uint64_t *pad = (uint64_t *) 686 ((uintptr_t)txwr + wr_len); 687 *pad = 0; 688 } 689 } 690 691 KASSERT(toep->tx_credits >= credits, 692 ("%s: not enough credits", __func__)); 693 694 toep->tx_credits -= credits; 695 toep->tx_nocompl += credits; 696 toep->plen_nocompl += plen; 697 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 698 toep->tx_nocompl >= toep->tx_total / 4) 699 compl = 1; 700 701 if (compl) { 702 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 703 toep->tx_nocompl = 0; 704 toep->plen_nocompl = 0; 705 } 706 707 tp->snd_nxt += plen; 708 tp->snd_max += plen; 709 710 SOCKBUF_LOCK(sb); 711 KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__)); 712 sb->sb_sndptr = sb_sndptr; 713 SOCKBUF_UNLOCK(sb); 714 715 toep->flags |= TPF_TX_DATA_SENT; 716 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 717 toep->flags |= TPF_TX_SUSPENDED; 718 719 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 720 txsd->plen = plen; 721 txsd->tx_credits = credits; 722 txsd++; 723 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 724 toep->txsd_pidx = 0; 725 txsd = &toep->txsd[0]; 726 } 727 toep->txsd_avail--; 728 729 t4_l2t_send(sc, wr, toep->l2te); 730 } while (m != NULL); 731 732 /* Send a FIN if requested, but only if there's no more data to send */ 733 if (m == NULL && toep->flags & TPF_SEND_FIN) 734 close_conn(sc, toep); 735 } 736 737 int 738 t4_tod_output(struct toedev *tod, struct tcpcb *tp) 739 { 740 struct adapter *sc = tod->tod_softc; 741 #ifdef INVARIANTS 742 struct inpcb *inp = tp->t_inpcb; 743 #endif 744 struct toepcb *toep = tp->t_toe; 745 746 INP_WLOCK_ASSERT(inp); 747 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 748 ("%s: inp %p dropped.", __func__, inp)); 749 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 750 751 t4_push_frames(sc, toep, 0); 752 753 return (0); 754 } 755 756 int 757 t4_send_fin(struct toedev *tod, struct tcpcb *tp) 758 { 759 struct adapter *sc = tod->tod_softc; 760 #ifdef INVARIANTS 761 struct inpcb *inp = tp->t_inpcb; 762 #endif 763 struct toepcb *toep = tp->t_toe; 764 765 INP_WLOCK_ASSERT(inp); 766 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 767 ("%s: inp %p dropped.", __func__, inp)); 768 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 769 770 toep->flags |= TPF_SEND_FIN; 771 if (tp->t_state >= TCPS_ESTABLISHED) 772 t4_push_frames(sc, toep, 0); 773 774 return (0); 775 } 776 777 int 778 t4_send_rst(struct toedev *tod, struct tcpcb *tp) 779 { 780 struct adapter *sc = tod->tod_softc; 781 #if defined(INVARIANTS) 782 struct inpcb *inp = tp->t_inpcb; 783 #endif 784 struct toepcb *toep = tp->t_toe; 785 786 INP_WLOCK_ASSERT(inp); 787 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 788 ("%s: inp %p dropped.", __func__, inp)); 789 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 790 791 /* hmmmm */ 792 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 793 ("%s: flowc for tid %u [%s] not sent already", 794 __func__, toep->tid, tcpstates[tp->t_state])); 795 796 send_reset(sc, toep, 0); 797 return (0); 798 } 799 800 /* 801 * Peer has sent us a FIN. 802 */ 803 static int 804 do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 805 { 806 struct adapter *sc = iq->adapter; 807 const struct cpl_peer_close *cpl = (const void *)(rss + 1); 808 unsigned int tid = GET_TID(cpl); 809 struct toepcb *toep = lookup_tid(sc, tid); 810 struct inpcb *inp = toep->inp; 811 struct tcpcb *tp = NULL; 812 struct socket *so; 813 struct sockbuf *sb; 814 #ifdef INVARIANTS 815 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 816 #endif 817 818 KASSERT(opcode == CPL_PEER_CLOSE, 819 ("%s: unexpected opcode 0x%x", __func__, opcode)); 820 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 821 822 if (__predict_false(toep->flags & TPF_SYNQE)) { 823 #ifdef INVARIANTS 824 struct synq_entry *synqe = (void *)toep; 825 826 INP_WLOCK(synqe->lctx->inp); 827 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 828 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 829 ("%s: listen socket closed but tid %u not aborted.", 830 __func__, tid)); 831 } else { 832 /* 833 * do_pass_accept_req is still running and will 834 * eventually take care of this tid. 835 */ 836 } 837 INP_WUNLOCK(synqe->lctx->inp); 838 #endif 839 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 840 toep, toep->flags); 841 return (0); 842 } 843 844 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 845 846 INP_INFO_WLOCK(&V_tcbinfo); 847 INP_WLOCK(inp); 848 tp = intotcpcb(inp); 849 850 CTR5(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x, inp %p", __func__, 851 tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, inp); 852 853 if (toep->flags & TPF_ABORT_SHUTDOWN) 854 goto done; 855 856 tp->rcv_nxt++; /* FIN */ 857 858 so = inp->inp_socket; 859 sb = &so->so_rcv; 860 SOCKBUF_LOCK(sb); 861 if (__predict_false(toep->ddp_flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE))) { 862 m = get_ddp_mbuf(be32toh(cpl->rcv_nxt) - tp->rcv_nxt); 863 tp->rcv_nxt = be32toh(cpl->rcv_nxt); 864 toep->ddp_flags &= ~(DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE); 865 866 KASSERT(toep->sb_cc >= sb->sb_cc, 867 ("%s: sb %p has more data (%d) than last time (%d).", 868 __func__, sb, sb->sb_cc, toep->sb_cc)); 869 toep->rx_credits += toep->sb_cc - sb->sb_cc; 870 #ifdef USE_DDP_RX_FLOW_CONTROL 871 toep->rx_credits -= m->m_len; /* adjust for F_RX_FC_DDP */ 872 #endif 873 sbappendstream_locked(sb, m); 874 toep->sb_cc = sb->sb_cc; 875 } 876 socantrcvmore_locked(so); /* unlocks the sockbuf */ 877 878 if (toep->ulp_mode != ULP_MODE_RDMA) { 879 KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt), 880 ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt, 881 be32toh(cpl->rcv_nxt))); 882 } 883 884 switch (tp->t_state) { 885 case TCPS_SYN_RECEIVED: 886 tp->t_starttime = ticks; 887 /* FALLTHROUGH */ 888 889 case TCPS_ESTABLISHED: 890 tp->t_state = TCPS_CLOSE_WAIT; 891 break; 892 893 case TCPS_FIN_WAIT_1: 894 tp->t_state = TCPS_CLOSING; 895 break; 896 897 case TCPS_FIN_WAIT_2: 898 tcp_twstart(tp); 899 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 900 INP_INFO_WUNLOCK(&V_tcbinfo); 901 902 INP_WLOCK(inp); 903 final_cpl_received(toep); 904 return (0); 905 906 default: 907 log(LOG_ERR, "%s: TID %u received CPL_PEER_CLOSE in state %d\n", 908 __func__, tid, tp->t_state); 909 } 910 done: 911 INP_WUNLOCK(inp); 912 INP_INFO_WUNLOCK(&V_tcbinfo); 913 return (0); 914 } 915 916 /* 917 * Peer has ACK'd our FIN. 918 */ 919 static int 920 do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss, 921 struct mbuf *m) 922 { 923 struct adapter *sc = iq->adapter; 924 const struct cpl_close_con_rpl *cpl = (const void *)(rss + 1); 925 unsigned int tid = GET_TID(cpl); 926 struct toepcb *toep = lookup_tid(sc, tid); 927 struct inpcb *inp = toep->inp; 928 struct tcpcb *tp = NULL; 929 struct socket *so = NULL; 930 #ifdef INVARIANTS 931 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 932 #endif 933 934 KASSERT(opcode == CPL_CLOSE_CON_RPL, 935 ("%s: unexpected opcode 0x%x", __func__, opcode)); 936 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 937 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 938 939 INP_INFO_WLOCK(&V_tcbinfo); 940 INP_WLOCK(inp); 941 tp = intotcpcb(inp); 942 943 CTR4(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x", 944 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags); 945 946 if (toep->flags & TPF_ABORT_SHUTDOWN) 947 goto done; 948 949 so = inp->inp_socket; 950 tp->snd_una = be32toh(cpl->snd_nxt) - 1; /* exclude FIN */ 951 952 switch (tp->t_state) { 953 case TCPS_CLOSING: /* see TCPS_FIN_WAIT_2 in do_peer_close too */ 954 tcp_twstart(tp); 955 release: 956 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 957 INP_INFO_WUNLOCK(&V_tcbinfo); 958 959 INP_WLOCK(inp); 960 final_cpl_received(toep); /* no more CPLs expected */ 961 962 return (0); 963 case TCPS_LAST_ACK: 964 if (tcp_close(tp)) 965 INP_WUNLOCK(inp); 966 goto release; 967 968 case TCPS_FIN_WAIT_1: 969 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 970 soisdisconnected(so); 971 tp->t_state = TCPS_FIN_WAIT_2; 972 break; 973 974 default: 975 log(LOG_ERR, 976 "%s: TID %u received CPL_CLOSE_CON_RPL in state %s\n", 977 __func__, tid, tcpstates[tp->t_state]); 978 } 979 done: 980 INP_WUNLOCK(inp); 981 INP_INFO_WUNLOCK(&V_tcbinfo); 982 return (0); 983 } 984 985 void 986 send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid, 987 int rst_status) 988 { 989 struct wrqe *wr; 990 struct cpl_abort_rpl *cpl; 991 992 wr = alloc_wrqe(sizeof(*cpl), ofld_txq); 993 if (wr == NULL) { 994 /* XXX */ 995 panic("%s: allocation failure.", __func__); 996 } 997 cpl = wrtod(wr); 998 999 INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid); 1000 cpl->cmd = rst_status; 1001 1002 t4_wrq_tx(sc, wr); 1003 } 1004 1005 static int 1006 abort_status_to_errno(struct tcpcb *tp, unsigned int abort_reason) 1007 { 1008 switch (abort_reason) { 1009 case CPL_ERR_BAD_SYN: 1010 case CPL_ERR_CONN_RESET: 1011 return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET); 1012 case CPL_ERR_XMIT_TIMEDOUT: 1013 case CPL_ERR_PERSIST_TIMEDOUT: 1014 case CPL_ERR_FINWAIT2_TIMEDOUT: 1015 case CPL_ERR_KEEPALIVE_TIMEDOUT: 1016 return (ETIMEDOUT); 1017 default: 1018 return (EIO); 1019 } 1020 } 1021 1022 /* 1023 * TCP RST from the peer, timeout, or some other such critical error. 1024 */ 1025 static int 1026 do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1027 { 1028 struct adapter *sc = iq->adapter; 1029 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); 1030 unsigned int tid = GET_TID(cpl); 1031 struct toepcb *toep = lookup_tid(sc, tid); 1032 struct sge_wrq *ofld_txq = toep->ofld_txq; 1033 struct inpcb *inp; 1034 struct tcpcb *tp; 1035 #ifdef INVARIANTS 1036 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1037 #endif 1038 1039 KASSERT(opcode == CPL_ABORT_REQ_RSS, 1040 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1041 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1042 1043 if (toep->flags & TPF_SYNQE) 1044 return (do_abort_req_synqe(iq, rss, m)); 1045 1046 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1047 1048 if (negative_advice(cpl->status)) { 1049 CTR4(KTR_CXGBE, "%s: negative advice %d for tid %d (0x%x)", 1050 __func__, cpl->status, tid, toep->flags); 1051 return (0); /* Ignore negative advice */ 1052 } 1053 1054 inp = toep->inp; 1055 INP_INFO_WLOCK(&V_tcbinfo); /* for tcp_close */ 1056 INP_WLOCK(inp); 1057 1058 tp = intotcpcb(inp); 1059 1060 CTR6(KTR_CXGBE, 1061 "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x, status %d", 1062 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, 1063 inp->inp_flags, cpl->status); 1064 1065 /* 1066 * If we'd initiated an abort earlier the reply to it is responsible for 1067 * cleaning up resources. Otherwise we tear everything down right here 1068 * right now. We owe the T4 a CPL_ABORT_RPL no matter what. 1069 */ 1070 if (toep->flags & TPF_ABORT_SHUTDOWN) { 1071 INP_WUNLOCK(inp); 1072 goto done; 1073 } 1074 toep->flags |= TPF_ABORT_SHUTDOWN; 1075 1076 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 1077 struct socket *so = inp->inp_socket; 1078 1079 if (so != NULL) 1080 so_error_set(so, abort_status_to_errno(tp, 1081 cpl->status)); 1082 tp = tcp_close(tp); 1083 if (tp == NULL) 1084 INP_WLOCK(inp); /* re-acquire */ 1085 } 1086 1087 final_cpl_received(toep); 1088 done: 1089 INP_INFO_WUNLOCK(&V_tcbinfo); 1090 send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST); 1091 return (0); 1092 } 1093 1094 /* 1095 * Reply to the CPL_ABORT_REQ (send_reset) 1096 */ 1097 static int 1098 do_abort_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1099 { 1100 struct adapter *sc = iq->adapter; 1101 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); 1102 unsigned int tid = GET_TID(cpl); 1103 struct toepcb *toep = lookup_tid(sc, tid); 1104 struct inpcb *inp = toep->inp; 1105 #ifdef INVARIANTS 1106 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1107 #endif 1108 1109 KASSERT(opcode == CPL_ABORT_RPL_RSS, 1110 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1111 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1112 1113 if (toep->flags & TPF_SYNQE) 1114 return (do_abort_rpl_synqe(iq, rss, m)); 1115 1116 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1117 1118 CTR5(KTR_CXGBE, "%s: tid %u, toep %p, inp %p, status %d", 1119 __func__, tid, toep, inp, cpl->status); 1120 1121 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1122 ("%s: wasn't expecting abort reply", __func__)); 1123 1124 INP_WLOCK(inp); 1125 final_cpl_received(toep); 1126 1127 return (0); 1128 } 1129 1130 static int 1131 do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1132 { 1133 struct adapter *sc = iq->adapter; 1134 const struct cpl_rx_data *cpl = mtod(m, const void *); 1135 unsigned int tid = GET_TID(cpl); 1136 struct toepcb *toep = lookup_tid(sc, tid); 1137 struct inpcb *inp = toep->inp; 1138 struct tcpcb *tp; 1139 struct socket *so; 1140 struct sockbuf *sb; 1141 int len; 1142 uint32_t ddp_placed = 0; 1143 1144 if (__predict_false(toep->flags & TPF_SYNQE)) { 1145 #ifdef INVARIANTS 1146 struct synq_entry *synqe = (void *)toep; 1147 1148 INP_WLOCK(synqe->lctx->inp); 1149 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1150 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1151 ("%s: listen socket closed but tid %u not aborted.", 1152 __func__, tid)); 1153 } else { 1154 /* 1155 * do_pass_accept_req is still running and will 1156 * eventually take care of this tid. 1157 */ 1158 } 1159 INP_WUNLOCK(synqe->lctx->inp); 1160 #endif 1161 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1162 toep, toep->flags); 1163 m_freem(m); 1164 return (0); 1165 } 1166 1167 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1168 1169 /* strip off CPL header */ 1170 m_adj(m, sizeof(*cpl)); 1171 len = m->m_pkthdr.len; 1172 1173 INP_WLOCK(inp); 1174 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) { 1175 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", 1176 __func__, tid, len, inp->inp_flags); 1177 INP_WUNLOCK(inp); 1178 m_freem(m); 1179 return (0); 1180 } 1181 1182 tp = intotcpcb(inp); 1183 1184 if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq))) 1185 ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt; 1186 1187 tp->rcv_nxt += len; 1188 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); 1189 tp->rcv_wnd -= len; 1190 tp->t_rcvtime = ticks; 1191 1192 so = inp_inpcbtosocket(inp); 1193 sb = &so->so_rcv; 1194 SOCKBUF_LOCK(sb); 1195 1196 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 1197 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)", 1198 __func__, tid, len); 1199 m_freem(m); 1200 SOCKBUF_UNLOCK(sb); 1201 INP_WUNLOCK(inp); 1202 1203 INP_INFO_WLOCK(&V_tcbinfo); 1204 INP_WLOCK(inp); 1205 tp = tcp_drop(tp, ECONNRESET); 1206 if (tp) 1207 INP_WUNLOCK(inp); 1208 INP_INFO_WUNLOCK(&V_tcbinfo); 1209 1210 return (0); 1211 } 1212 1213 /* receive buffer autosize */ 1214 if (sb->sb_flags & SB_AUTOSIZE && 1215 V_tcp_do_autorcvbuf && 1216 sb->sb_hiwat < V_tcp_autorcvbuf_max && 1217 len > (sbspace(sb) / 8 * 7)) { 1218 unsigned int hiwat = sb->sb_hiwat; 1219 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc, 1220 V_tcp_autorcvbuf_max); 1221 1222 if (!sbreserve_locked(sb, newsize, so, NULL)) 1223 sb->sb_flags &= ~SB_AUTOSIZE; 1224 else 1225 toep->rx_credits += newsize - hiwat; 1226 } 1227 1228 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1229 int changed = !(toep->ddp_flags & DDP_ON) ^ cpl->ddp_off; 1230 1231 if (changed) { 1232 if (toep->ddp_flags & DDP_SC_REQ) 1233 toep->ddp_flags ^= DDP_ON | DDP_SC_REQ; 1234 else { 1235 KASSERT(cpl->ddp_off == 1, 1236 ("%s: DDP switched on by itself.", 1237 __func__)); 1238 1239 /* Fell out of DDP mode */ 1240 toep->ddp_flags &= ~(DDP_ON | DDP_BUF0_ACTIVE | 1241 DDP_BUF1_ACTIVE); 1242 1243 if (ddp_placed) 1244 insert_ddp_data(toep, ddp_placed); 1245 } 1246 } 1247 1248 if ((toep->ddp_flags & DDP_OK) == 0 && 1249 time_uptime >= toep->ddp_disabled + DDP_RETRY_WAIT) { 1250 toep->ddp_score = DDP_LOW_SCORE; 1251 toep->ddp_flags |= DDP_OK; 1252 CTR3(KTR_CXGBE, "%s: tid %u DDP_OK @ %u", 1253 __func__, tid, time_uptime); 1254 } 1255 1256 if (toep->ddp_flags & DDP_ON) { 1257 1258 /* 1259 * CPL_RX_DATA with DDP on can only be an indicate. Ask 1260 * soreceive to post a buffer or disable DDP. The 1261 * payload that arrived in this indicate is appended to 1262 * the socket buffer as usual. 1263 */ 1264 1265 #if 0 1266 CTR5(KTR_CXGBE, 1267 "%s: tid %u (0x%x) DDP indicate (seq 0x%x, len %d)", 1268 __func__, tid, toep->flags, be32toh(cpl->seq), len); 1269 #endif 1270 sb->sb_flags |= SB_DDP_INDICATE; 1271 } else if ((toep->ddp_flags & (DDP_OK|DDP_SC_REQ)) == DDP_OK && 1272 tp->rcv_wnd > DDP_RSVD_WIN && len >= sc->tt.ddp_thres) { 1273 1274 /* 1275 * DDP allowed but isn't on (and a request to switch it 1276 * on isn't pending either), and conditions are ripe for 1277 * it to work. Switch it on. 1278 */ 1279 1280 enable_ddp(sc, toep); 1281 } 1282 } 1283 1284 KASSERT(toep->sb_cc >= sb->sb_cc, 1285 ("%s: sb %p has more data (%d) than last time (%d).", 1286 __func__, sb, sb->sb_cc, toep->sb_cc)); 1287 toep->rx_credits += toep->sb_cc - sb->sb_cc; 1288 sbappendstream_locked(sb, m); 1289 toep->sb_cc = sb->sb_cc; 1290 sorwakeup_locked(so); 1291 SOCKBUF_UNLOCK_ASSERT(sb); 1292 1293 INP_WUNLOCK(inp); 1294 return (0); 1295 } 1296 1297 #define S_CPL_FW4_ACK_OPCODE 24 1298 #define M_CPL_FW4_ACK_OPCODE 0xff 1299 #define V_CPL_FW4_ACK_OPCODE(x) ((x) << S_CPL_FW4_ACK_OPCODE) 1300 #define G_CPL_FW4_ACK_OPCODE(x) \ 1301 (((x) >> S_CPL_FW4_ACK_OPCODE) & M_CPL_FW4_ACK_OPCODE) 1302 1303 #define S_CPL_FW4_ACK_FLOWID 0 1304 #define M_CPL_FW4_ACK_FLOWID 0xffffff 1305 #define V_CPL_FW4_ACK_FLOWID(x) ((x) << S_CPL_FW4_ACK_FLOWID) 1306 #define G_CPL_FW4_ACK_FLOWID(x) \ 1307 (((x) >> S_CPL_FW4_ACK_FLOWID) & M_CPL_FW4_ACK_FLOWID) 1308 1309 #define S_CPL_FW4_ACK_CR 24 1310 #define M_CPL_FW4_ACK_CR 0xff 1311 #define V_CPL_FW4_ACK_CR(x) ((x) << S_CPL_FW4_ACK_CR) 1312 #define G_CPL_FW4_ACK_CR(x) (((x) >> S_CPL_FW4_ACK_CR) & M_CPL_FW4_ACK_CR) 1313 1314 #define S_CPL_FW4_ACK_SEQVAL 0 1315 #define M_CPL_FW4_ACK_SEQVAL 0x1 1316 #define V_CPL_FW4_ACK_SEQVAL(x) ((x) << S_CPL_FW4_ACK_SEQVAL) 1317 #define G_CPL_FW4_ACK_SEQVAL(x) \ 1318 (((x) >> S_CPL_FW4_ACK_SEQVAL) & M_CPL_FW4_ACK_SEQVAL) 1319 #define F_CPL_FW4_ACK_SEQVAL V_CPL_FW4_ACK_SEQVAL(1U) 1320 1321 static int 1322 do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1323 { 1324 struct adapter *sc = iq->adapter; 1325 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 1326 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 1327 struct toepcb *toep = lookup_tid(sc, tid); 1328 struct inpcb *inp; 1329 struct tcpcb *tp; 1330 struct socket *so; 1331 uint8_t credits = cpl->credits; 1332 struct ofld_tx_sdesc *txsd; 1333 int plen; 1334 #ifdef INVARIANTS 1335 unsigned int opcode = G_CPL_FW4_ACK_OPCODE(be32toh(OPCODE_TID(cpl))); 1336 #endif 1337 1338 /* 1339 * Very unusual case: we'd sent a flowc + abort_req for a synq entry and 1340 * now this comes back carrying the credits for the flowc. 1341 */ 1342 if (__predict_false(toep->flags & TPF_SYNQE)) { 1343 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1344 ("%s: credits for a synq entry %p", __func__, toep)); 1345 return (0); 1346 } 1347 1348 inp = toep->inp; 1349 1350 KASSERT(opcode == CPL_FW4_ACK, 1351 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1352 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1353 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1354 1355 INP_WLOCK(inp); 1356 1357 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) { 1358 INP_WUNLOCK(inp); 1359 return (0); 1360 } 1361 1362 KASSERT((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0, 1363 ("%s: inp_flags 0x%x", __func__, inp->inp_flags)); 1364 1365 tp = intotcpcb(inp); 1366 1367 if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) { 1368 tcp_seq snd_una = be32toh(cpl->snd_una); 1369 1370 #ifdef INVARIANTS 1371 if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) { 1372 log(LOG_ERR, 1373 "%s: unexpected seq# %x for TID %u, snd_una %x\n", 1374 __func__, snd_una, toep->tid, tp->snd_una); 1375 } 1376 #endif 1377 1378 if (tp->snd_una != snd_una) { 1379 tp->snd_una = snd_una; 1380 tp->ts_recent_age = tcp_ts_getticks(); 1381 } 1382 } 1383 1384 so = inp->inp_socket; 1385 txsd = &toep->txsd[toep->txsd_cidx]; 1386 plen = 0; 1387 while (credits) { 1388 KASSERT(credits >= txsd->tx_credits, 1389 ("%s: too many (or partial) credits", __func__)); 1390 credits -= txsd->tx_credits; 1391 toep->tx_credits += txsd->tx_credits; 1392 plen += txsd->plen; 1393 txsd++; 1394 toep->txsd_avail++; 1395 KASSERT(toep->txsd_avail <= toep->txsd_total, 1396 ("%s: txsd avail > total", __func__)); 1397 if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) { 1398 txsd = &toep->txsd[0]; 1399 toep->txsd_cidx = 0; 1400 } 1401 } 1402 1403 if (toep->tx_credits == toep->tx_total) { 1404 toep->tx_nocompl = 0; 1405 toep->plen_nocompl = 0; 1406 } 1407 1408 if (toep->flags & TPF_TX_SUSPENDED && 1409 toep->tx_credits >= toep->tx_total / 4) { 1410 toep->flags &= ~TPF_TX_SUSPENDED; 1411 t4_push_frames(sc, toep, plen); 1412 } else if (plen > 0) { 1413 struct sockbuf *sb = &so->so_snd; 1414 1415 SOCKBUF_LOCK(sb); 1416 sbdrop_locked(sb, plen); 1417 sowwakeup_locked(so); 1418 SOCKBUF_UNLOCK_ASSERT(sb); 1419 } 1420 1421 INP_WUNLOCK(inp); 1422 1423 return (0); 1424 } 1425 1426 static int 1427 do_set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1428 { 1429 struct adapter *sc = iq->adapter; 1430 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 1431 unsigned int tid = GET_TID(cpl); 1432 #ifdef INVARIANTS 1433 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1434 #endif 1435 1436 KASSERT(opcode == CPL_SET_TCB_RPL, 1437 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1438 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1439 1440 if (is_ftid(sc, tid)) 1441 return (t4_filter_rpl(iq, rss, m)); /* TCB is a filter */ 1442 1443 CXGBE_UNIMPLEMENTED(__func__); 1444 } 1445 1446 void 1447 t4_set_tcb_field(struct adapter *sc, struct toepcb *toep, int ctrl, 1448 uint16_t word, uint64_t mask, uint64_t val) 1449 { 1450 struct wrqe *wr; 1451 struct cpl_set_tcb_field *req; 1452 1453 wr = alloc_wrqe(sizeof(*req), ctrl ? toep->ctrlq : toep->ofld_txq); 1454 if (wr == NULL) { 1455 /* XXX */ 1456 panic("%s: allocation failure.", __func__); 1457 } 1458 req = wrtod(wr); 1459 1460 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid); 1461 req->reply_ctrl = htobe16(V_NO_REPLY(1) | 1462 V_QUEUENO(toep->ofld_rxq->iq.abs_id)); 1463 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0)); 1464 req->mask = htobe64(mask); 1465 req->val = htobe64(val); 1466 1467 t4_wrq_tx(sc, wr); 1468 } 1469 1470 void 1471 t4_init_cpl_io_handlers(struct adapter *sc) 1472 { 1473 1474 t4_register_cpl_handler(sc, CPL_PEER_CLOSE, do_peer_close); 1475 t4_register_cpl_handler(sc, CPL_CLOSE_CON_RPL, do_close_con_rpl); 1476 t4_register_cpl_handler(sc, CPL_ABORT_REQ_RSS, do_abort_req); 1477 t4_register_cpl_handler(sc, CPL_ABORT_RPL_RSS, do_abort_rpl); 1478 t4_register_cpl_handler(sc, CPL_RX_DATA, do_rx_data); 1479 t4_register_cpl_handler(sc, CPL_FW4_ACK, do_fw4_ack); 1480 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, do_set_tcb_rpl); 1481 } 1482 1483 void 1484 t4_uninit_cpl_io_handlers(struct adapter *sc) 1485 { 1486 1487 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl); 1488 } 1489 #endif 1490