1 /*- 2 * Copyright (c) 2012 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 33 #ifdef TCP_OFFLOAD 34 #include <sys/param.h> 35 #include <sys/types.h> 36 #include <sys/kernel.h> 37 #include <sys/ktr.h> 38 #include <sys/module.h> 39 #include <sys/protosw.h> 40 #include <sys/domain.h> 41 #include <sys/socket.h> 42 #include <sys/socketvar.h> 43 #include <sys/sglist.h> 44 #include <netinet/in.h> 45 #include <netinet/in_pcb.h> 46 #include <netinet/ip.h> 47 #include <netinet/tcp_var.h> 48 #define TCPSTATES 49 #include <netinet/tcp_fsm.h> 50 #include <netinet/tcp_seq.h> 51 #include <netinet/toecore.h> 52 53 #include "common/common.h" 54 #include "common/t4_msg.h" 55 #include "common/t4_regs.h" 56 #include "common/t4_tcb.h" 57 #include "tom/t4_tom_l2t.h" 58 #include "tom/t4_tom.h" 59 60 VNET_DECLARE(int, tcp_do_autosndbuf); 61 #define V_tcp_do_autosndbuf VNET(tcp_do_autosndbuf) 62 VNET_DECLARE(int, tcp_autosndbuf_inc); 63 #define V_tcp_autosndbuf_inc VNET(tcp_autosndbuf_inc) 64 VNET_DECLARE(int, tcp_autosndbuf_max); 65 #define V_tcp_autosndbuf_max VNET(tcp_autosndbuf_max) 66 VNET_DECLARE(int, tcp_do_autorcvbuf); 67 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf) 68 VNET_DECLARE(int, tcp_autorcvbuf_inc); 69 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc) 70 VNET_DECLARE(int, tcp_autorcvbuf_max); 71 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max) 72 73 void 74 send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp) 75 { 76 struct wrqe *wr; 77 struct fw_flowc_wr *flowc; 78 unsigned int nparams = ftxp ? 8 : 4, flowclen; 79 struct port_info *pi = toep->port; 80 struct adapter *sc = pi->adapter; 81 unsigned int pfvf = G_FW_VIID_PFN(pi->viid) << S_FW_VIID_PFN; 82 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 83 84 KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT), 85 ("%s: flowc for tid %u sent already", __func__, toep->tid)); 86 87 CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); 88 89 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); 90 91 wr = alloc_wrqe(roundup(flowclen, 16), toep->ofld_txq); 92 if (wr == NULL) { 93 /* XXX */ 94 panic("%s: allocation failure.", __func__); 95 } 96 flowc = wrtod(wr); 97 memset(flowc, 0, wr->wr_len); 98 99 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 100 V_FW_FLOWC_WR_NPARAMS(nparams)); 101 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | 102 V_FW_WR_FLOWID(toep->tid)); 103 104 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 105 flowc->mnemval[0].val = htobe32(pfvf); 106 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 107 flowc->mnemval[1].val = htobe32(pi->tx_chan); 108 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 109 flowc->mnemval[2].val = htobe32(pi->tx_chan); 110 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 111 flowc->mnemval[3].val = htobe32(toep->ofld_rxq->iq.abs_id); 112 if (ftxp) { 113 uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf); 114 115 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 116 flowc->mnemval[4].val = htobe32(ftxp->snd_nxt); 117 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 118 flowc->mnemval[5].val = htobe32(ftxp->rcv_nxt); 119 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 120 flowc->mnemval[6].val = htobe32(sndbuf); 121 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 122 flowc->mnemval[7].val = htobe32(ftxp->mss); 123 } 124 125 txsd->tx_credits = howmany(flowclen, 16); 126 txsd->plen = 0; 127 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 128 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 129 toep->tx_credits -= txsd->tx_credits; 130 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 131 toep->txsd_pidx = 0; 132 toep->txsd_avail--; 133 134 toep->flags |= TPF_FLOWC_WR_SENT; 135 t4_wrq_tx(sc, wr); 136 } 137 138 void 139 send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt) 140 { 141 struct wrqe *wr; 142 struct cpl_abort_req *req; 143 int tid = toep->tid; 144 struct inpcb *inp = toep->inp; 145 struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */ 146 147 INP_WLOCK_ASSERT(inp); 148 149 CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s", 150 __func__, toep->tid, 151 inp->inp_flags & INP_DROPPED ? "inp dropped" : 152 tcpstates[tp->t_state], 153 toep->flags, inp->inp_flags, 154 toep->flags & TPF_ABORT_SHUTDOWN ? 155 " (abort already in progress)" : ""); 156 157 if (toep->flags & TPF_ABORT_SHUTDOWN) 158 return; /* abort already in progress */ 159 160 toep->flags |= TPF_ABORT_SHUTDOWN; 161 162 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 163 ("%s: flowc_wr not sent for tid %d.", __func__, tid)); 164 165 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 166 if (wr == NULL) { 167 /* XXX */ 168 panic("%s: allocation failure.", __func__); 169 } 170 req = wrtod(wr); 171 172 INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid); 173 if (inp->inp_flags & INP_DROPPED) 174 req->rsvd0 = htobe32(snd_nxt); 175 else 176 req->rsvd0 = htobe32(tp->snd_nxt); 177 req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT); 178 req->cmd = CPL_ABORT_SEND_RST; 179 180 /* 181 * XXX: What's the correct way to tell that the inp hasn't been detached 182 * from its socket? Should I even be flushing the snd buffer here? 183 */ 184 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 185 struct socket *so = inp->inp_socket; 186 187 if (so != NULL) /* because I'm not sure. See comment above */ 188 sbflush(&so->so_snd); 189 } 190 191 t4_l2t_send(sc, wr, toep->l2te); 192 } 193 194 /* 195 * Called when a connection is established to translate the TCP options 196 * reported by HW to FreeBSD's native format. 197 */ 198 static void 199 assign_rxopt(struct tcpcb *tp, unsigned int opt) 200 { 201 struct toepcb *toep = tp->t_toe; 202 struct adapter *sc = td_adapter(toep->td); 203 204 INP_LOCK_ASSERT(tp->t_inpcb); 205 206 tp->t_maxseg = tp->t_maxopd = sc->params.mtus[G_TCPOPT_MSS(opt)] - 40; 207 208 if (G_TCPOPT_TSTAMP(opt)) { 209 tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */ 210 tp->ts_recent = 0; /* hmmm */ 211 tp->ts_recent_age = tcp_ts_getticks(); 212 tp->t_maxseg -= TCPOLEN_TSTAMP_APPA; 213 } 214 215 if (G_TCPOPT_SACK(opt)) 216 tp->t_flags |= TF_SACK_PERMIT; /* should already be set */ 217 else 218 tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */ 219 220 if (G_TCPOPT_WSCALE_OK(opt)) 221 tp->t_flags |= TF_RCVD_SCALE; 222 223 /* Doing window scaling? */ 224 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 225 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 226 tp->rcv_scale = tp->request_r_scale; 227 tp->snd_scale = G_TCPOPT_SND_WSCALE(opt); 228 } 229 } 230 231 /* 232 * Completes some final bits of initialization for just established connections 233 * and changes their state to TCPS_ESTABLISHED. 234 * 235 * The ISNs are from after the exchange of SYNs. i.e., the true ISN + 1. 236 */ 237 void 238 make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn, 239 uint16_t opt) 240 { 241 struct inpcb *inp = toep->inp; 242 struct socket *so = inp->inp_socket; 243 struct tcpcb *tp = intotcpcb(inp); 244 long bufsize; 245 uint32_t iss = be32toh(snd_isn) - 1; /* true ISS */ 246 uint32_t irs = be32toh(rcv_isn) - 1; /* true IRS */ 247 uint16_t tcpopt = be16toh(opt); 248 struct flowc_tx_params ftxp; 249 250 INP_WLOCK_ASSERT(inp); 251 KASSERT(tp->t_state == TCPS_SYN_SENT || 252 tp->t_state == TCPS_SYN_RECEIVED, 253 ("%s: TCP state %s", __func__, tcpstates[tp->t_state])); 254 255 CTR4(KTR_CXGBE, "%s: tid %d, toep %p, inp %p", 256 __func__, toep->tid, toep, inp); 257 258 tp->t_state = TCPS_ESTABLISHED; 259 tp->t_starttime = ticks; 260 TCPSTAT_INC(tcps_connects); 261 262 tp->irs = irs; 263 tcp_rcvseqinit(tp); 264 tp->rcv_wnd = toep->rx_credits << 10; 265 tp->rcv_adv += tp->rcv_wnd; 266 tp->last_ack_sent = tp->rcv_nxt; 267 268 /* 269 * If we were unable to send all rx credits via opt0, save the remainder 270 * in rx_credits so that they can be handed over with the next credit 271 * update. 272 */ 273 SOCKBUF_LOCK(&so->so_rcv); 274 bufsize = select_rcv_wnd(so); 275 SOCKBUF_UNLOCK(&so->so_rcv); 276 toep->rx_credits = bufsize - tp->rcv_wnd; 277 278 tp->iss = iss; 279 tcp_sendseqinit(tp); 280 tp->snd_una = iss + 1; 281 tp->snd_nxt = iss + 1; 282 tp->snd_max = iss + 1; 283 284 assign_rxopt(tp, tcpopt); 285 286 SOCKBUF_LOCK(&so->so_snd); 287 if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf) 288 bufsize = V_tcp_autosndbuf_max; 289 else 290 bufsize = sbspace(&so->so_snd); 291 SOCKBUF_UNLOCK(&so->so_snd); 292 293 ftxp.snd_nxt = tp->snd_nxt; 294 ftxp.rcv_nxt = tp->rcv_nxt; 295 ftxp.snd_space = bufsize; 296 ftxp.mss = tp->t_maxseg; 297 send_flowc_wr(toep, &ftxp); 298 299 soisconnected(so); 300 } 301 302 static int 303 send_rx_credits(struct adapter *sc, struct toepcb *toep, int credits) 304 { 305 struct wrqe *wr; 306 struct cpl_rx_data_ack *req; 307 uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); 308 309 KASSERT(credits >= 0, ("%s: %d credits", __func__, credits)); 310 311 wr = alloc_wrqe(sizeof(*req), toep->ctrlq); 312 if (wr == NULL) 313 return (0); 314 req = wrtod(wr); 315 316 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); 317 req->credit_dack = htobe32(dack | V_RX_CREDITS(credits)); 318 319 t4_wrq_tx(sc, wr); 320 return (credits); 321 } 322 323 void 324 t4_rcvd(struct toedev *tod, struct tcpcb *tp) 325 { 326 struct adapter *sc = tod->tod_softc; 327 struct inpcb *inp = tp->t_inpcb; 328 struct socket *so = inp->inp_socket; 329 struct sockbuf *sb = &so->so_rcv; 330 struct toepcb *toep = tp->t_toe; 331 int credits; 332 333 INP_WLOCK_ASSERT(inp); 334 335 SOCKBUF_LOCK(sb); 336 KASSERT(toep->sb_cc >= sb->sb_cc, 337 ("%s: sb %p has more data (%d) than last time (%d).", 338 __func__, sb, sb->sb_cc, toep->sb_cc)); 339 toep->rx_credits += toep->sb_cc - sb->sb_cc; 340 toep->sb_cc = sb->sb_cc; 341 credits = toep->rx_credits; 342 SOCKBUF_UNLOCK(sb); 343 344 if (credits > 0 && 345 (credits + 16384 >= tp->rcv_wnd || credits >= 15 * 1024)) { 346 347 credits = send_rx_credits(sc, toep, credits); 348 SOCKBUF_LOCK(sb); 349 toep->rx_credits -= credits; 350 SOCKBUF_UNLOCK(sb); 351 tp->rcv_wnd += credits; 352 tp->rcv_adv += credits; 353 } 354 } 355 356 /* 357 * Close a connection by sending a CPL_CLOSE_CON_REQ message. 358 */ 359 static int 360 close_conn(struct adapter *sc, struct toepcb *toep) 361 { 362 struct wrqe *wr; 363 struct cpl_close_con_req *req; 364 unsigned int tid = toep->tid; 365 366 CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid, 367 toep->flags & TPF_FIN_SENT ? ", IGNORED" : ""); 368 369 if (toep->flags & TPF_FIN_SENT) 370 return (0); 371 372 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 373 ("%s: flowc_wr not sent for tid %u.", __func__, tid)); 374 375 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 376 if (wr == NULL) { 377 /* XXX */ 378 panic("%s: allocation failure.", __func__); 379 } 380 req = wrtod(wr); 381 382 req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | 383 V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr))); 384 req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) | 385 V_FW_WR_FLOWID(tid)); 386 req->wr.wr_lo = cpu_to_be64(0); 387 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 388 req->rsvd = 0; 389 390 toep->flags |= TPF_FIN_SENT; 391 toep->flags &= ~TPF_SEND_FIN; 392 t4_l2t_send(sc, wr, toep->l2te); 393 394 return (0); 395 } 396 397 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16) 398 #define MIN_OFLD_TX_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16)) 399 400 /* Maximum amount of immediate data we could stuff in a WR */ 401 static inline int 402 max_imm_payload(int tx_credits) 403 { 404 const int n = 2; /* Use only up to 2 desc for imm. data WR */ 405 406 KASSERT(tx_credits >= 0 && 407 tx_credits <= MAX_OFLD_TX_CREDITS, 408 ("%s: %d credits", __func__, tx_credits)); 409 410 if (tx_credits < MIN_OFLD_TX_CREDITS) 411 return (0); 412 413 if (tx_credits >= (n * EQ_ESIZE) / 16) 414 return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr)); 415 else 416 return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr)); 417 } 418 419 /* Maximum number of SGL entries we could stuff in a WR */ 420 static inline int 421 max_dsgl_nsegs(int tx_credits) 422 { 423 int nseg = 1; /* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */ 424 int sge_pair_credits = tx_credits - MIN_OFLD_TX_CREDITS; 425 426 KASSERT(tx_credits >= 0 && 427 tx_credits <= MAX_OFLD_TX_CREDITS, 428 ("%s: %d credits", __func__, tx_credits)); 429 430 if (tx_credits < MIN_OFLD_TX_CREDITS) 431 return (0); 432 433 nseg += 2 * (sge_pair_credits * 16 / 24); 434 if ((sge_pair_credits * 16) % 24 == 16) 435 nseg++; 436 437 return (nseg); 438 } 439 440 static inline void 441 write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen, 442 unsigned int plen, uint8_t credits, int more_to_come) 443 { 444 struct fw_ofld_tx_data_wr *txwr = dst; 445 int shove = !more_to_come; 446 int compl = 1; 447 448 /* 449 * We always request completion notifications from the firmware. The 450 * only exception is when we know we'll get more data to send shortly 451 * and that we'll have some tx credits remaining to transmit that data. 452 */ 453 if (more_to_come && toep->tx_credits - credits >= MIN_OFLD_TX_CREDITS) 454 compl = 0; 455 456 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) | 457 V_FW_WR_COMPL(compl) | V_FW_WR_IMMDLEN(immdlen)); 458 txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) | 459 V_FW_WR_LEN16(credits)); 460 txwr->tunnel_to_proxy = 461 htobe32(V_FW_OFLD_TX_DATA_WR_ULPMODE(toep->ulp_mode) | 462 V_FW_OFLD_TX_DATA_WR_URGENT(0) | /* XXX */ 463 V_FW_OFLD_TX_DATA_WR_SHOVE(shove)); 464 txwr->plen = htobe32(plen); 465 } 466 467 /* 468 * Generate a DSGL from a starting mbuf. The total number of segments and the 469 * maximum segments in any one mbuf are provided. 470 */ 471 static void 472 write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n) 473 { 474 struct mbuf *m; 475 struct ulptx_sgl *usgl = dst; 476 int i, j, rc; 477 struct sglist sg; 478 struct sglist_seg segs[n]; 479 480 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__)); 481 482 sglist_init(&sg, n, segs); 483 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 484 V_ULPTX_NSGE(nsegs)); 485 486 i = -1; 487 for (m = start; m != stop; m = m->m_next) { 488 rc = sglist_append(&sg, mtod(m, void *), m->m_len); 489 if (__predict_false(rc != 0)) 490 panic("%s: sglist_append %d", __func__, rc); 491 492 for (j = 0; j < sg.sg_nseg; i++, j++) { 493 if (i < 0) { 494 usgl->len0 = htobe32(segs[j].ss_len); 495 usgl->addr0 = htobe64(segs[j].ss_paddr); 496 } else { 497 usgl->sge[i / 2].len[i & 1] = 498 htobe32(segs[j].ss_len); 499 usgl->sge[i / 2].addr[i & 1] = 500 htobe64(segs[j].ss_paddr); 501 } 502 #ifdef INVARIANTS 503 nsegs--; 504 #endif 505 } 506 sglist_reset(&sg); 507 } 508 if (i & 1) 509 usgl->sge[i / 2].len[1] = htobe32(0); 510 KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, stop %p", 511 __func__, nsegs, start, stop)); 512 } 513 514 /* 515 * Max number of SGL entries an offload tx work request can have. This is 41 516 * (1 + 40) for a full 512B work request. 517 * fw_ofld_tx_data_wr(16B) + ulptx_sgl(16B, 1) + ulptx_sge_pair(480B, 40) 518 */ 519 #define OFLD_SGL_LEN (41) 520 521 /* 522 * Send data and/or a FIN to the peer. 523 * 524 * The socket's so_snd buffer consists of a stream of data starting with sb_mb 525 * and linked together with m_next. sb_sndptr, if set, is the last mbuf that 526 * was transmitted. 527 */ 528 static void 529 t4_push_frames(struct adapter *sc, struct toepcb *toep) 530 { 531 struct mbuf *sndptr, *m, *sb_sndptr; 532 struct fw_ofld_tx_data_wr *txwr; 533 struct wrqe *wr; 534 unsigned int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 535 struct inpcb *inp = toep->inp; 536 struct tcpcb *tp = intotcpcb(inp); 537 struct socket *so = inp->inp_socket; 538 struct sockbuf *sb = &so->so_snd; 539 int tx_credits; 540 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 541 542 INP_WLOCK_ASSERT(inp); 543 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 544 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 545 546 if (__predict_false(toep->ulp_mode != ULP_MODE_NONE && 547 toep->ulp_mode != ULP_MODE_TCPDDP)) 548 CXGBE_UNIMPLEMENTED("ulp_mode"); 549 550 /* 551 * This function doesn't resume by itself. Someone else must clear the 552 * flag and call this function. 553 */ 554 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) 555 return; 556 557 do { 558 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 559 max_imm = max_imm_payload(tx_credits); 560 max_nsegs = max_dsgl_nsegs(tx_credits); 561 562 SOCKBUF_LOCK(sb); 563 sb_sndptr = sb->sb_sndptr; 564 sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb; 565 plen = 0; 566 nsegs = 0; 567 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 568 for (m = sndptr; m != NULL; m = m->m_next) { 569 int n = sglist_count(mtod(m, void *), m->m_len); 570 571 nsegs += n; 572 plen += m->m_len; 573 574 /* This mbuf sent us _over_ the nsegs limit, back out */ 575 if (plen > max_imm && nsegs > max_nsegs) { 576 nsegs -= n; 577 plen -= m->m_len; 578 if (plen == 0) { 579 /* Too few credits */ 580 toep->flags |= TPF_TX_SUSPENDED; 581 SOCKBUF_UNLOCK(sb); 582 return; 583 } 584 break; 585 } 586 587 if (max_nsegs_1mbuf < n) 588 max_nsegs_1mbuf = n; 589 sb_sndptr = m; /* new sb->sb_sndptr if all goes well */ 590 591 /* This mbuf put us right at the max_nsegs limit */ 592 if (plen > max_imm && nsegs == max_nsegs) { 593 m = m->m_next; 594 break; 595 } 596 } 597 598 if (sb->sb_flags & SB_AUTOSIZE && 599 V_tcp_do_autosndbuf && 600 sb->sb_hiwat < V_tcp_autosndbuf_max && 601 sbspace(sb) < sb->sb_hiwat / 8 * 7) { 602 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc, 603 V_tcp_autosndbuf_max); 604 605 if (!sbreserve_locked(sb, newsize, so, NULL)) 606 sb->sb_flags &= ~SB_AUTOSIZE; 607 else { 608 sowwakeup_locked(so); /* room available */ 609 SOCKBUF_UNLOCK_ASSERT(sb); 610 goto unlocked; 611 } 612 } 613 SOCKBUF_UNLOCK(sb); 614 unlocked: 615 616 /* nothing to send */ 617 if (plen == 0) { 618 KASSERT(m == NULL, 619 ("%s: nothing to send, but m != NULL", __func__)); 620 break; 621 } 622 623 if (__predict_false(toep->flags & TPF_FIN_SENT)) 624 panic("%s: excess tx.", __func__); 625 626 if (plen <= max_imm) { 627 628 /* Immediate data tx */ 629 630 wr = alloc_wrqe(roundup(sizeof(*txwr) + plen, 16), 631 toep->ofld_txq); 632 if (wr == NULL) { 633 /* XXX: how will we recover from this? */ 634 toep->flags |= TPF_TX_SUSPENDED; 635 return; 636 } 637 txwr = wrtod(wr); 638 credits = howmany(wr->wr_len, 16); 639 write_tx_wr(txwr, toep, plen, plen, credits, 640 tp->t_flags & TF_MORETOCOME); 641 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 642 } else { 643 int wr_len; 644 645 /* DSGL tx */ 646 647 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 648 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 649 wr = alloc_wrqe(roundup(wr_len, 16), toep->ofld_txq); 650 if (wr == NULL) { 651 /* XXX: how will we recover from this? */ 652 toep->flags |= TPF_TX_SUSPENDED; 653 return; 654 } 655 txwr = wrtod(wr); 656 credits = howmany(wr_len, 16); 657 write_tx_wr(txwr, toep, 0, plen, credits, 658 tp->t_flags & TF_MORETOCOME); 659 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 660 max_nsegs_1mbuf); 661 if (wr_len & 0xf) { 662 uint64_t *pad = (uint64_t *) 663 ((uintptr_t)txwr + wr_len); 664 *pad = 0; 665 } 666 } 667 668 KASSERT(toep->tx_credits >= credits, 669 ("%s: not enough credits", __func__)); 670 671 toep->tx_credits -= credits; 672 673 tp->snd_nxt += plen; 674 tp->snd_max += plen; 675 676 SOCKBUF_LOCK(sb); 677 KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__)); 678 sb->sb_sndptr = sb_sndptr; 679 SOCKBUF_UNLOCK(sb); 680 681 toep->flags |= TPF_TX_DATA_SENT; 682 683 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 684 txsd->plen = plen; 685 txsd->tx_credits = credits; 686 txsd++; 687 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 688 toep->txsd_pidx = 0; 689 txsd = &toep->txsd[0]; 690 } 691 toep->txsd_avail--; 692 693 t4_l2t_send(sc, wr, toep->l2te); 694 } while (m != NULL); 695 696 /* Send a FIN if requested, but only if there's no more data to send */ 697 if (m == NULL && toep->flags & TPF_SEND_FIN) 698 close_conn(sc, toep); 699 } 700 701 int 702 t4_tod_output(struct toedev *tod, struct tcpcb *tp) 703 { 704 struct adapter *sc = tod->tod_softc; 705 #ifdef INVARIANTS 706 struct inpcb *inp = tp->t_inpcb; 707 #endif 708 struct toepcb *toep = tp->t_toe; 709 710 INP_WLOCK_ASSERT(inp); 711 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 712 ("%s: inp %p dropped.", __func__, inp)); 713 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 714 715 t4_push_frames(sc, toep); 716 717 return (0); 718 } 719 720 int 721 t4_send_fin(struct toedev *tod, struct tcpcb *tp) 722 { 723 struct adapter *sc = tod->tod_softc; 724 #ifdef INVARIANTS 725 struct inpcb *inp = tp->t_inpcb; 726 #endif 727 struct toepcb *toep = tp->t_toe; 728 729 INP_WLOCK_ASSERT(inp); 730 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 731 ("%s: inp %p dropped.", __func__, inp)); 732 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 733 734 toep->flags |= TPF_SEND_FIN; 735 t4_push_frames(sc, toep); 736 737 return (0); 738 } 739 740 int 741 t4_send_rst(struct toedev *tod, struct tcpcb *tp) 742 { 743 struct adapter *sc = tod->tod_softc; 744 #if defined(INVARIANTS) 745 struct inpcb *inp = tp->t_inpcb; 746 #endif 747 struct toepcb *toep = tp->t_toe; 748 749 INP_WLOCK_ASSERT(inp); 750 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 751 ("%s: inp %p dropped.", __func__, inp)); 752 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 753 754 /* hmmmm */ 755 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 756 ("%s: flowc for tid %u [%s] not sent already", 757 __func__, toep->tid, tcpstates[tp->t_state])); 758 759 send_reset(sc, toep, 0); 760 return (0); 761 } 762 763 /* 764 * Peer has sent us a FIN. 765 */ 766 static int 767 do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 768 { 769 struct adapter *sc = iq->adapter; 770 const struct cpl_peer_close *cpl = (const void *)(rss + 1); 771 unsigned int tid = GET_TID(cpl); 772 struct toepcb *toep = lookup_tid(sc, tid); 773 struct inpcb *inp = toep->inp; 774 struct tcpcb *tp = NULL; 775 struct socket *so; 776 struct sockbuf *sb; 777 #ifdef INVARIANTS 778 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 779 #endif 780 781 KASSERT(opcode == CPL_PEER_CLOSE, 782 ("%s: unexpected opcode 0x%x", __func__, opcode)); 783 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 784 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 785 786 INP_INFO_WLOCK(&V_tcbinfo); 787 INP_WLOCK(inp); 788 tp = intotcpcb(inp); 789 790 CTR5(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x, inp %p", __func__, 791 tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, inp); 792 793 if (toep->flags & TPF_ABORT_SHUTDOWN) 794 goto done; 795 796 tp->rcv_nxt++; /* FIN */ 797 798 so = inp->inp_socket; 799 sb = &so->so_rcv; 800 SOCKBUF_LOCK(sb); 801 if (__predict_false(toep->ddp_flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE))) { 802 m = m_get(M_NOWAIT, MT_DATA); 803 if (m == NULL) 804 CXGBE_UNIMPLEMENTED("mbuf alloc failure"); 805 806 m->m_len = be32toh(cpl->rcv_nxt) - tp->rcv_nxt; 807 m->m_flags |= M_DDP; /* Data is already where it should be */ 808 m->m_data = "nothing to see here"; 809 tp->rcv_nxt = be32toh(cpl->rcv_nxt); 810 811 toep->ddp_flags &= ~(DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE); 812 813 KASSERT(toep->sb_cc >= sb->sb_cc, 814 ("%s: sb %p has more data (%d) than last time (%d).", 815 __func__, sb, sb->sb_cc, toep->sb_cc)); 816 toep->rx_credits += toep->sb_cc - sb->sb_cc; 817 #ifdef USE_DDP_RX_FLOW_CONTROL 818 toep->rx_credits -= m->m_len; /* adjust for F_RX_FC_DDP */ 819 #endif 820 sbappendstream_locked(sb, m); 821 toep->sb_cc = sb->sb_cc; 822 } 823 socantrcvmore_locked(so); /* unlocks the sockbuf */ 824 825 KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt), 826 ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt, 827 be32toh(cpl->rcv_nxt))); 828 829 switch (tp->t_state) { 830 case TCPS_SYN_RECEIVED: 831 tp->t_starttime = ticks; 832 /* FALLTHROUGH */ 833 834 case TCPS_ESTABLISHED: 835 tp->t_state = TCPS_CLOSE_WAIT; 836 break; 837 838 case TCPS_FIN_WAIT_1: 839 tp->t_state = TCPS_CLOSING; 840 break; 841 842 case TCPS_FIN_WAIT_2: 843 tcp_twstart(tp); 844 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 845 INP_INFO_WUNLOCK(&V_tcbinfo); 846 847 INP_WLOCK(inp); 848 final_cpl_received(toep); 849 return (0); 850 851 default: 852 log(LOG_ERR, "%s: TID %u received CPL_PEER_CLOSE in state %d\n", 853 __func__, tid, tp->t_state); 854 } 855 done: 856 INP_WUNLOCK(inp); 857 INP_INFO_WUNLOCK(&V_tcbinfo); 858 return (0); 859 } 860 861 /* 862 * Peer has ACK'd our FIN. 863 */ 864 static int 865 do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss, 866 struct mbuf *m) 867 { 868 struct adapter *sc = iq->adapter; 869 const struct cpl_close_con_rpl *cpl = (const void *)(rss + 1); 870 unsigned int tid = GET_TID(cpl); 871 struct toepcb *toep = lookup_tid(sc, tid); 872 struct inpcb *inp = toep->inp; 873 struct tcpcb *tp = NULL; 874 struct socket *so = NULL; 875 #ifdef INVARIANTS 876 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 877 #endif 878 879 KASSERT(opcode == CPL_CLOSE_CON_RPL, 880 ("%s: unexpected opcode 0x%x", __func__, opcode)); 881 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 882 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 883 884 INP_INFO_WLOCK(&V_tcbinfo); 885 INP_WLOCK(inp); 886 tp = intotcpcb(inp); 887 888 CTR4(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x", 889 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags); 890 891 if (toep->flags & TPF_ABORT_SHUTDOWN) 892 goto done; 893 894 so = inp->inp_socket; 895 tp->snd_una = be32toh(cpl->snd_nxt) - 1; /* exclude FIN */ 896 897 switch (tp->t_state) { 898 case TCPS_CLOSING: /* see TCPS_FIN_WAIT_2 in do_peer_close too */ 899 tcp_twstart(tp); 900 release: 901 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 902 INP_INFO_WUNLOCK(&V_tcbinfo); 903 904 INP_WLOCK(inp); 905 final_cpl_received(toep); /* no more CPLs expected */ 906 907 return (0); 908 case TCPS_LAST_ACK: 909 if (tcp_close(tp)) 910 INP_WUNLOCK(inp); 911 goto release; 912 913 case TCPS_FIN_WAIT_1: 914 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 915 soisdisconnected(so); 916 tp->t_state = TCPS_FIN_WAIT_2; 917 break; 918 919 default: 920 log(LOG_ERR, 921 "%s: TID %u received CPL_CLOSE_CON_RPL in state %s\n", 922 __func__, tid, tcpstates[tp->t_state]); 923 } 924 done: 925 INP_WUNLOCK(inp); 926 INP_INFO_WUNLOCK(&V_tcbinfo); 927 return (0); 928 } 929 930 void 931 send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid, 932 int rst_status) 933 { 934 struct wrqe *wr; 935 struct cpl_abort_rpl *cpl; 936 937 wr = alloc_wrqe(sizeof(*cpl), ofld_txq); 938 if (wr == NULL) { 939 /* XXX */ 940 panic("%s: allocation failure.", __func__); 941 } 942 cpl = wrtod(wr); 943 944 INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid); 945 cpl->cmd = rst_status; 946 947 t4_wrq_tx(sc, wr); 948 } 949 950 static int 951 abort_status_to_errno(struct tcpcb *tp, unsigned int abort_reason) 952 { 953 switch (abort_reason) { 954 case CPL_ERR_BAD_SYN: 955 case CPL_ERR_CONN_RESET: 956 return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET); 957 case CPL_ERR_XMIT_TIMEDOUT: 958 case CPL_ERR_PERSIST_TIMEDOUT: 959 case CPL_ERR_FINWAIT2_TIMEDOUT: 960 case CPL_ERR_KEEPALIVE_TIMEDOUT: 961 return (ETIMEDOUT); 962 default: 963 return (EIO); 964 } 965 } 966 967 /* 968 * TCP RST from the peer, timeout, or some other such critical error. 969 */ 970 static int 971 do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 972 { 973 struct adapter *sc = iq->adapter; 974 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); 975 unsigned int tid = GET_TID(cpl); 976 struct toepcb *toep = lookup_tid(sc, tid); 977 struct sge_wrq *ofld_txq = toep->ofld_txq; 978 struct inpcb *inp; 979 struct tcpcb *tp; 980 struct socket *so; 981 #ifdef INVARIANTS 982 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 983 #endif 984 985 KASSERT(opcode == CPL_ABORT_REQ_RSS, 986 ("%s: unexpected opcode 0x%x", __func__, opcode)); 987 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 988 989 if (toep->flags & TPF_SYNQE) 990 return (do_abort_req_synqe(iq, rss, m)); 991 992 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 993 994 if (cpl->status == CPL_ERR_RTX_NEG_ADVICE || 995 cpl->status == CPL_ERR_PERSIST_NEG_ADVICE) { 996 CTR4(KTR_CXGBE, "%s: negative advice %d for tid %d (0x%x)", 997 __func__, cpl->status, tid, toep->flags); 998 return (0); /* Ignore negative advice */ 999 } 1000 1001 inp = toep->inp; 1002 INP_INFO_WLOCK(&V_tcbinfo); /* for tcp_close */ 1003 INP_WLOCK(inp); 1004 1005 tp = intotcpcb(inp); 1006 so = inp->inp_socket; 1007 1008 CTR6(KTR_CXGBE, 1009 "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x, status %d", 1010 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, 1011 inp->inp_flags, cpl->status); 1012 1013 /* 1014 * If we'd initiated an abort earlier the reply to it is responsible for 1015 * cleaning up resources. Otherwise we tear everything down right here 1016 * right now. We owe the T4 a CPL_ABORT_RPL no matter what. 1017 */ 1018 if (toep->flags & TPF_ABORT_SHUTDOWN) { 1019 INP_WUNLOCK(inp); 1020 goto done; 1021 } 1022 toep->flags |= TPF_ABORT_SHUTDOWN; 1023 1024 so_error_set(so, abort_status_to_errno(tp, cpl->status)); 1025 tp = tcp_close(tp); 1026 if (tp == NULL) 1027 INP_WLOCK(inp); /* re-acquire */ 1028 1029 final_cpl_received(toep); 1030 done: 1031 INP_INFO_WUNLOCK(&V_tcbinfo); 1032 send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST); 1033 return (0); 1034 } 1035 1036 /* 1037 * Reply to the CPL_ABORT_REQ (send_reset) 1038 */ 1039 static int 1040 do_abort_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1041 { 1042 struct adapter *sc = iq->adapter; 1043 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); 1044 unsigned int tid = GET_TID(cpl); 1045 struct toepcb *toep = lookup_tid(sc, tid); 1046 struct inpcb *inp = toep->inp; 1047 #ifdef INVARIANTS 1048 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1049 #endif 1050 1051 KASSERT(opcode == CPL_ABORT_RPL_RSS, 1052 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1053 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1054 1055 if (toep->flags & TPF_SYNQE) 1056 return (do_abort_rpl_synqe(iq, rss, m)); 1057 1058 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1059 1060 CTR5(KTR_CXGBE, "%s: tid %u, toep %p, inp %p, status %d", 1061 __func__, tid, toep, inp, cpl->status); 1062 1063 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1064 ("%s: wasn't expecting abort reply", __func__)); 1065 1066 INP_WLOCK(inp); 1067 final_cpl_received(toep); 1068 1069 return (0); 1070 } 1071 1072 static int 1073 do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1074 { 1075 struct adapter *sc = iq->adapter; 1076 const struct cpl_rx_data *cpl = mtod(m, const void *); 1077 unsigned int tid = GET_TID(cpl); 1078 struct toepcb *toep = lookup_tid(sc, tid); 1079 struct inpcb *inp = toep->inp; 1080 struct tcpcb *tp; 1081 struct socket *so; 1082 struct sockbuf *sb; 1083 int len; 1084 1085 if (__predict_false(toep->flags & TPF_SYNQE)) { 1086 /* 1087 * do_pass_establish failed and must be attempting to abort the 1088 * synqe's tid. Meanwhile, the T4 has sent us data for such a 1089 * connection. 1090 */ 1091 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1092 ("%s: synqe and tid isn't being aborted.", __func__)); 1093 m_freem(m); 1094 return (0); 1095 } 1096 1097 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1098 1099 /* strip off CPL header */ 1100 m_adj(m, sizeof(*cpl)); 1101 len = m->m_pkthdr.len; 1102 1103 INP_WLOCK(inp); 1104 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) { 1105 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", 1106 __func__, tid, len, inp->inp_flags); 1107 INP_WUNLOCK(inp); 1108 m_freem(m); 1109 return (0); 1110 } 1111 1112 tp = intotcpcb(inp); 1113 1114 #ifdef INVARIANTS 1115 if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq))) { 1116 log(LOG_ERR, 1117 "%s: unexpected seq# %x for TID %u, rcv_nxt %x\n", 1118 __func__, be32toh(cpl->seq), toep->tid, tp->rcv_nxt); 1119 } 1120 #endif 1121 1122 tp->rcv_nxt += len; 1123 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); 1124 tp->rcv_wnd -= len; 1125 tp->t_rcvtime = ticks; 1126 1127 so = inp_inpcbtosocket(inp); 1128 sb = &so->so_rcv; 1129 SOCKBUF_LOCK(sb); 1130 1131 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 1132 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)", 1133 __func__, tid, len); 1134 m_freem(m); 1135 SOCKBUF_UNLOCK(sb); 1136 INP_WUNLOCK(inp); 1137 1138 INP_INFO_WLOCK(&V_tcbinfo); 1139 INP_WLOCK(inp); 1140 tp = tcp_drop(tp, ECONNRESET); 1141 if (tp) 1142 INP_WUNLOCK(inp); 1143 INP_INFO_WUNLOCK(&V_tcbinfo); 1144 1145 return (0); 1146 } 1147 1148 /* receive buffer autosize */ 1149 if (sb->sb_flags & SB_AUTOSIZE && 1150 V_tcp_do_autorcvbuf && 1151 sb->sb_hiwat < V_tcp_autorcvbuf_max && 1152 len > (sbspace(sb) / 8 * 7)) { 1153 unsigned int hiwat = sb->sb_hiwat; 1154 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc, 1155 V_tcp_autorcvbuf_max); 1156 1157 if (!sbreserve_locked(sb, newsize, so, NULL)) 1158 sb->sb_flags &= ~SB_AUTOSIZE; 1159 else 1160 toep->rx_credits += newsize - hiwat; 1161 } 1162 1163 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1164 int changed = !(toep->ddp_flags & DDP_ON) ^ cpl->ddp_off; 1165 1166 if (changed) { 1167 if (__predict_false(!(toep->ddp_flags & DDP_SC_REQ))) { 1168 /* XXX: handle this if legitimate */ 1169 panic("%s: unexpected DDP state change %d", 1170 __func__, cpl->ddp_off); 1171 } 1172 toep->ddp_flags ^= DDP_ON | DDP_SC_REQ; 1173 } 1174 1175 if ((toep->ddp_flags & DDP_OK) == 0 && 1176 time_uptime >= toep->ddp_disabled + DDP_RETRY_WAIT) { 1177 toep->ddp_score = DDP_LOW_SCORE; 1178 toep->ddp_flags |= DDP_OK; 1179 CTR3(KTR_CXGBE, "%s: tid %u DDP_OK @ %u", 1180 __func__, tid, time_uptime); 1181 } 1182 1183 if (toep->ddp_flags & DDP_ON) { 1184 1185 /* 1186 * CPL_RX_DATA with DDP on can only be an indicate. Ask 1187 * soreceive to post a buffer or disable DDP. The 1188 * payload that arrived in this indicate is appended to 1189 * the socket buffer as usual. 1190 */ 1191 1192 #if 0 1193 CTR5(KTR_CXGBE, 1194 "%s: tid %u (0x%x) DDP indicate (seq 0x%x, len %d)", 1195 __func__, tid, toep->flags, be32toh(cpl->seq), len); 1196 #endif 1197 sb->sb_flags |= SB_DDP_INDICATE; 1198 } else if ((toep->ddp_flags & (DDP_OK|DDP_SC_REQ)) == DDP_OK && 1199 tp->rcv_wnd > DDP_RSVD_WIN && len >= sc->tt.ddp_thres) { 1200 1201 /* 1202 * DDP allowed but isn't on (and a request to switch it 1203 * on isn't pending either), and conditions are ripe for 1204 * it to work. Switch it on. 1205 */ 1206 1207 enable_ddp(sc, toep); 1208 } 1209 } 1210 1211 KASSERT(toep->sb_cc >= sb->sb_cc, 1212 ("%s: sb %p has more data (%d) than last time (%d).", 1213 __func__, sb, sb->sb_cc, toep->sb_cc)); 1214 toep->rx_credits += toep->sb_cc - sb->sb_cc; 1215 sbappendstream_locked(sb, m); 1216 toep->sb_cc = sb->sb_cc; 1217 sorwakeup_locked(so); 1218 SOCKBUF_UNLOCK_ASSERT(sb); 1219 1220 INP_WUNLOCK(inp); 1221 return (0); 1222 } 1223 1224 #define S_CPL_FW4_ACK_OPCODE 24 1225 #define M_CPL_FW4_ACK_OPCODE 0xff 1226 #define V_CPL_FW4_ACK_OPCODE(x) ((x) << S_CPL_FW4_ACK_OPCODE) 1227 #define G_CPL_FW4_ACK_OPCODE(x) \ 1228 (((x) >> S_CPL_FW4_ACK_OPCODE) & M_CPL_FW4_ACK_OPCODE) 1229 1230 #define S_CPL_FW4_ACK_FLOWID 0 1231 #define M_CPL_FW4_ACK_FLOWID 0xffffff 1232 #define V_CPL_FW4_ACK_FLOWID(x) ((x) << S_CPL_FW4_ACK_FLOWID) 1233 #define G_CPL_FW4_ACK_FLOWID(x) \ 1234 (((x) >> S_CPL_FW4_ACK_FLOWID) & M_CPL_FW4_ACK_FLOWID) 1235 1236 #define S_CPL_FW4_ACK_CR 24 1237 #define M_CPL_FW4_ACK_CR 0xff 1238 #define V_CPL_FW4_ACK_CR(x) ((x) << S_CPL_FW4_ACK_CR) 1239 #define G_CPL_FW4_ACK_CR(x) (((x) >> S_CPL_FW4_ACK_CR) & M_CPL_FW4_ACK_CR) 1240 1241 #define S_CPL_FW4_ACK_SEQVAL 0 1242 #define M_CPL_FW4_ACK_SEQVAL 0x1 1243 #define V_CPL_FW4_ACK_SEQVAL(x) ((x) << S_CPL_FW4_ACK_SEQVAL) 1244 #define G_CPL_FW4_ACK_SEQVAL(x) \ 1245 (((x) >> S_CPL_FW4_ACK_SEQVAL) & M_CPL_FW4_ACK_SEQVAL) 1246 #define F_CPL_FW4_ACK_SEQVAL V_CPL_FW4_ACK_SEQVAL(1U) 1247 1248 static int 1249 do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1250 { 1251 struct adapter *sc = iq->adapter; 1252 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 1253 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 1254 struct toepcb *toep = lookup_tid(sc, tid); 1255 struct inpcb *inp; 1256 struct tcpcb *tp; 1257 struct socket *so; 1258 uint8_t credits = cpl->credits; 1259 struct ofld_tx_sdesc *txsd; 1260 int plen; 1261 #ifdef INVARIANTS 1262 unsigned int opcode = G_CPL_FW4_ACK_OPCODE(be32toh(OPCODE_TID(cpl))); 1263 #endif 1264 1265 /* 1266 * Very unusual case: we'd sent a flowc + abort_req for a synq entry and 1267 * now this comes back carrying the credits for the flowc. 1268 */ 1269 if (__predict_false(toep->flags & TPF_SYNQE)) { 1270 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1271 ("%s: credits for a synq entry %p", __func__, toep)); 1272 return (0); 1273 } 1274 1275 inp = toep->inp; 1276 1277 KASSERT(opcode == CPL_FW4_ACK, 1278 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1279 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1280 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1281 1282 INP_WLOCK(inp); 1283 1284 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) { 1285 INP_WUNLOCK(inp); 1286 return (0); 1287 } 1288 1289 KASSERT((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0, 1290 ("%s: inp_flags 0x%x", __func__, inp->inp_flags)); 1291 1292 tp = intotcpcb(inp); 1293 1294 if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) { 1295 tcp_seq snd_una = be32toh(cpl->snd_una); 1296 1297 #ifdef INVARIANTS 1298 if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) { 1299 log(LOG_ERR, 1300 "%s: unexpected seq# %x for TID %u, snd_una %x\n", 1301 __func__, snd_una, toep->tid, tp->snd_una); 1302 } 1303 #endif 1304 1305 if (tp->snd_una != snd_una) { 1306 tp->snd_una = snd_una; 1307 tp->ts_recent_age = tcp_ts_getticks(); 1308 } 1309 } 1310 1311 so = inp->inp_socket; 1312 txsd = &toep->txsd[toep->txsd_cidx]; 1313 plen = 0; 1314 while (credits) { 1315 KASSERT(credits >= txsd->tx_credits, 1316 ("%s: too many (or partial) credits", __func__)); 1317 credits -= txsd->tx_credits; 1318 toep->tx_credits += txsd->tx_credits; 1319 plen += txsd->plen; 1320 txsd++; 1321 toep->txsd_avail++; 1322 KASSERT(toep->txsd_avail <= toep->txsd_total, 1323 ("%s: txsd avail > total", __func__)); 1324 if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) { 1325 txsd = &toep->txsd[0]; 1326 toep->txsd_cidx = 0; 1327 } 1328 } 1329 1330 if (plen > 0) { 1331 struct sockbuf *sb = &so->so_snd; 1332 1333 SOCKBUF_LOCK(sb); 1334 sbdrop_locked(sb, plen); 1335 sowwakeup_locked(so); 1336 SOCKBUF_UNLOCK_ASSERT(sb); 1337 } 1338 1339 /* XXX */ 1340 if ((toep->flags & TPF_TX_SUSPENDED && 1341 toep->tx_credits >= MIN_OFLD_TX_CREDITS) || 1342 toep->tx_credits == toep->txsd_total * 1343 howmany((sizeof(struct fw_ofld_tx_data_wr) + 1), 16)) { 1344 toep->flags &= ~TPF_TX_SUSPENDED; 1345 t4_push_frames(sc, toep); 1346 } 1347 INP_WUNLOCK(inp); 1348 1349 return (0); 1350 } 1351 1352 static int 1353 do_set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1354 { 1355 struct adapter *sc = iq->adapter; 1356 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 1357 unsigned int tid = GET_TID(cpl); 1358 #ifdef INVARIANTS 1359 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1360 #endif 1361 1362 KASSERT(opcode == CPL_SET_TCB_RPL, 1363 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1364 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1365 1366 if (tid >= sc->tids.ftid_base && 1367 tid < sc->tids.ftid_base + sc->tids.nftids) 1368 return (t4_filter_rpl(iq, rss, m)); /* TCB is a filter */ 1369 1370 CXGBE_UNIMPLEMENTED(__func__); 1371 } 1372 1373 void 1374 t4_set_tcb_field(struct adapter *sc, struct toepcb *toep, uint16_t word, 1375 uint64_t mask, uint64_t val) 1376 { 1377 struct wrqe *wr; 1378 struct cpl_set_tcb_field *req; 1379 1380 wr = alloc_wrqe(sizeof(*req), toep->ctrlq); 1381 if (wr == NULL) { 1382 /* XXX */ 1383 panic("%s: allocation failure.", __func__); 1384 } 1385 req = wrtod(wr); 1386 1387 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid); 1388 req->reply_ctrl = htobe16(V_NO_REPLY(1) | 1389 V_QUEUENO(toep->ofld_rxq->iq.abs_id)); 1390 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0)); 1391 req->mask = htobe64(mask); 1392 req->val = htobe64(val); 1393 1394 t4_wrq_tx(sc, wr); 1395 } 1396 1397 void 1398 t4_init_cpl_io_handlers(struct adapter *sc) 1399 { 1400 1401 t4_register_cpl_handler(sc, CPL_PEER_CLOSE, do_peer_close); 1402 t4_register_cpl_handler(sc, CPL_CLOSE_CON_RPL, do_close_con_rpl); 1403 t4_register_cpl_handler(sc, CPL_ABORT_REQ_RSS, do_abort_req); 1404 t4_register_cpl_handler(sc, CPL_ABORT_RPL_RSS, do_abort_rpl); 1405 t4_register_cpl_handler(sc, CPL_RX_DATA, do_rx_data); 1406 t4_register_cpl_handler(sc, CPL_FW4_ACK, do_fw4_ack); 1407 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, do_set_tcb_rpl); 1408 } 1409 1410 void 1411 t4_uninit_cpl_io_handlers(struct adapter *sc) 1412 { 1413 1414 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl); 1415 } 1416 #endif 1417