1 /*- 2 * Copyright (c) 2012 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 33 #ifdef TCP_OFFLOAD 34 #include <sys/param.h> 35 #include <sys/types.h> 36 #include <sys/kernel.h> 37 #include <sys/ktr.h> 38 #include <sys/module.h> 39 #include <sys/protosw.h> 40 #include <sys/domain.h> 41 #include <sys/socket.h> 42 #include <sys/socketvar.h> 43 #include <sys/sglist.h> 44 #include <netinet/in.h> 45 #include <netinet/in_pcb.h> 46 #include <netinet/ip.h> 47 #include <netinet/ip6.h> 48 #include <netinet/tcp_var.h> 49 #define TCPSTATES 50 #include <netinet/tcp_fsm.h> 51 #include <netinet/tcp_seq.h> 52 #include <netinet/toecore.h> 53 54 #include "common/common.h" 55 #include "common/t4_msg.h" 56 #include "common/t4_regs.h" 57 #include "common/t4_tcb.h" 58 #include "tom/t4_tom_l2t.h" 59 #include "tom/t4_tom.h" 60 61 VNET_DECLARE(int, tcp_do_autosndbuf); 62 #define V_tcp_do_autosndbuf VNET(tcp_do_autosndbuf) 63 VNET_DECLARE(int, tcp_autosndbuf_inc); 64 #define V_tcp_autosndbuf_inc VNET(tcp_autosndbuf_inc) 65 VNET_DECLARE(int, tcp_autosndbuf_max); 66 #define V_tcp_autosndbuf_max VNET(tcp_autosndbuf_max) 67 VNET_DECLARE(int, tcp_do_autorcvbuf); 68 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf) 69 VNET_DECLARE(int, tcp_autorcvbuf_inc); 70 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc) 71 VNET_DECLARE(int, tcp_autorcvbuf_max); 72 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max) 73 74 /* 75 * For ULP connections HW may add headers, e.g., for digests, that aren't part 76 * of the messages sent by the host but that are part of the TCP payload and 77 * therefore consume TCP sequence space. Tx connection parameters that 78 * operate in TCP sequence space are affected by the HW additions and need to 79 * compensate for them to accurately track TCP sequence numbers. This array 80 * contains the compensating extra lengths for ULP packets. It is indexed by 81 * a packet's ULP submode. 82 */ 83 const unsigned int t4_ulp_extra_len[] = {0, 4, 4, 8}; 84 85 /* 86 * Return the length of any HW additions that will be made to a Tx packet. 87 * Such additions can happen for some types of ULP packets. 88 */ 89 static inline unsigned int 90 ulp_extra_len(struct mbuf *m, int *ulp_mode) 91 { 92 struct m_tag *mtag; 93 94 if ((mtag = m_tag_find(m, CXGBE_ISCSI_MBUF_TAG, NULL)) == NULL) 95 return (0); 96 *ulp_mode = *((int *)(mtag + 1)); 97 98 return (t4_ulp_extra_len[*ulp_mode & 3]); 99 } 100 101 void 102 send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp) 103 { 104 struct wrqe *wr; 105 struct fw_flowc_wr *flowc; 106 unsigned int nparams = ftxp ? 8 : 6, flowclen; 107 struct port_info *pi = toep->port; 108 struct adapter *sc = pi->adapter; 109 unsigned int pfvf = G_FW_VIID_PFN(pi->viid) << S_FW_VIID_PFN; 110 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 111 112 KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT), 113 ("%s: flowc for tid %u sent already", __func__, toep->tid)); 114 115 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); 116 117 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq); 118 if (wr == NULL) { 119 /* XXX */ 120 panic("%s: allocation failure.", __func__); 121 } 122 flowc = wrtod(wr); 123 memset(flowc, 0, wr->wr_len); 124 125 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 126 V_FW_FLOWC_WR_NPARAMS(nparams)); 127 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | 128 V_FW_WR_FLOWID(toep->tid)); 129 130 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 131 flowc->mnemval[0].val = htobe32(pfvf); 132 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 133 flowc->mnemval[1].val = htobe32(pi->tx_chan); 134 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 135 flowc->mnemval[2].val = htobe32(pi->tx_chan); 136 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 137 flowc->mnemval[3].val = htobe32(toep->ofld_rxq->iq.abs_id); 138 if (ftxp) { 139 uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf); 140 141 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 142 flowc->mnemval[4].val = htobe32(ftxp->snd_nxt); 143 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 144 flowc->mnemval[5].val = htobe32(ftxp->rcv_nxt); 145 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 146 flowc->mnemval[6].val = htobe32(sndbuf); 147 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 148 flowc->mnemval[7].val = htobe32(ftxp->mss); 149 150 CTR6(KTR_CXGBE, 151 "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x", 152 __func__, toep->tid, ftxp->mss, sndbuf, ftxp->snd_nxt, 153 ftxp->rcv_nxt); 154 } else { 155 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF; 156 flowc->mnemval[4].val = htobe32(512); 157 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS; 158 flowc->mnemval[5].val = htobe32(512); 159 160 CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); 161 } 162 163 txsd->tx_credits = howmany(flowclen, 16); 164 txsd->plen = 0; 165 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 166 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 167 toep->tx_credits -= txsd->tx_credits; 168 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 169 toep->txsd_pidx = 0; 170 toep->txsd_avail--; 171 172 toep->flags |= TPF_FLOWC_WR_SENT; 173 t4_wrq_tx(sc, wr); 174 } 175 176 void 177 send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt) 178 { 179 struct wrqe *wr; 180 struct cpl_abort_req *req; 181 int tid = toep->tid; 182 struct inpcb *inp = toep->inp; 183 struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */ 184 185 INP_WLOCK_ASSERT(inp); 186 187 CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s", 188 __func__, toep->tid, 189 inp->inp_flags & INP_DROPPED ? "inp dropped" : 190 tcpstates[tp->t_state], 191 toep->flags, inp->inp_flags, 192 toep->flags & TPF_ABORT_SHUTDOWN ? 193 " (abort already in progress)" : ""); 194 195 if (toep->flags & TPF_ABORT_SHUTDOWN) 196 return; /* abort already in progress */ 197 198 toep->flags |= TPF_ABORT_SHUTDOWN; 199 200 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 201 ("%s: flowc_wr not sent for tid %d.", __func__, tid)); 202 203 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 204 if (wr == NULL) { 205 /* XXX */ 206 panic("%s: allocation failure.", __func__); 207 } 208 req = wrtod(wr); 209 210 INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid); 211 if (inp->inp_flags & INP_DROPPED) 212 req->rsvd0 = htobe32(snd_nxt); 213 else 214 req->rsvd0 = htobe32(tp->snd_nxt); 215 req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT); 216 req->cmd = CPL_ABORT_SEND_RST; 217 218 /* 219 * XXX: What's the correct way to tell that the inp hasn't been detached 220 * from its socket? Should I even be flushing the snd buffer here? 221 */ 222 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 223 struct socket *so = inp->inp_socket; 224 225 if (so != NULL) /* because I'm not sure. See comment above */ 226 sbflush(&so->so_snd); 227 } 228 229 t4_l2t_send(sc, wr, toep->l2te); 230 } 231 232 /* 233 * Called when a connection is established to translate the TCP options 234 * reported by HW to FreeBSD's native format. 235 */ 236 static void 237 assign_rxopt(struct tcpcb *tp, unsigned int opt) 238 { 239 struct toepcb *toep = tp->t_toe; 240 struct inpcb *inp = tp->t_inpcb; 241 struct adapter *sc = td_adapter(toep->td); 242 int n; 243 244 INP_LOCK_ASSERT(inp); 245 246 if (inp->inp_inc.inc_flags & INC_ISIPV6) 247 n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 248 else 249 n = sizeof(struct ip) + sizeof(struct tcphdr); 250 tp->t_maxseg = tp->t_maxopd = sc->params.mtus[G_TCPOPT_MSS(opt)] - n; 251 252 CTR4(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u)", __func__, toep->tid, 253 G_TCPOPT_MSS(opt), sc->params.mtus[G_TCPOPT_MSS(opt)]); 254 255 if (G_TCPOPT_TSTAMP(opt)) { 256 tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */ 257 tp->ts_recent = 0; /* hmmm */ 258 tp->ts_recent_age = tcp_ts_getticks(); 259 tp->t_maxseg -= TCPOLEN_TSTAMP_APPA; 260 } 261 262 if (G_TCPOPT_SACK(opt)) 263 tp->t_flags |= TF_SACK_PERMIT; /* should already be set */ 264 else 265 tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */ 266 267 if (G_TCPOPT_WSCALE_OK(opt)) 268 tp->t_flags |= TF_RCVD_SCALE; 269 270 /* Doing window scaling? */ 271 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 272 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 273 tp->rcv_scale = tp->request_r_scale; 274 tp->snd_scale = G_TCPOPT_SND_WSCALE(opt); 275 } 276 } 277 278 /* 279 * Completes some final bits of initialization for just established connections 280 * and changes their state to TCPS_ESTABLISHED. 281 * 282 * The ISNs are from after the exchange of SYNs. i.e., the true ISN + 1. 283 */ 284 void 285 make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn, 286 uint16_t opt) 287 { 288 struct inpcb *inp = toep->inp; 289 struct socket *so = inp->inp_socket; 290 struct tcpcb *tp = intotcpcb(inp); 291 long bufsize; 292 uint32_t iss = be32toh(snd_isn) - 1; /* true ISS */ 293 uint32_t irs = be32toh(rcv_isn) - 1; /* true IRS */ 294 uint16_t tcpopt = be16toh(opt); 295 struct flowc_tx_params ftxp; 296 297 INP_WLOCK_ASSERT(inp); 298 KASSERT(tp->t_state == TCPS_SYN_SENT || 299 tp->t_state == TCPS_SYN_RECEIVED, 300 ("%s: TCP state %s", __func__, tcpstates[tp->t_state])); 301 302 CTR4(KTR_CXGBE, "%s: tid %d, toep %p, inp %p", 303 __func__, toep->tid, toep, inp); 304 305 tp->t_state = TCPS_ESTABLISHED; 306 tp->t_starttime = ticks; 307 TCPSTAT_INC(tcps_connects); 308 309 tp->irs = irs; 310 tcp_rcvseqinit(tp); 311 tp->rcv_wnd = toep->rx_credits << 10; 312 tp->rcv_adv += tp->rcv_wnd; 313 tp->last_ack_sent = tp->rcv_nxt; 314 315 /* 316 * If we were unable to send all rx credits via opt0, save the remainder 317 * in rx_credits so that they can be handed over with the next credit 318 * update. 319 */ 320 SOCKBUF_LOCK(&so->so_rcv); 321 bufsize = select_rcv_wnd(so); 322 SOCKBUF_UNLOCK(&so->so_rcv); 323 toep->rx_credits = bufsize - tp->rcv_wnd; 324 325 tp->iss = iss; 326 tcp_sendseqinit(tp); 327 tp->snd_una = iss + 1; 328 tp->snd_nxt = iss + 1; 329 tp->snd_max = iss + 1; 330 331 assign_rxopt(tp, tcpopt); 332 333 SOCKBUF_LOCK(&so->so_snd); 334 if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf) 335 bufsize = V_tcp_autosndbuf_max; 336 else 337 bufsize = sbspace(&so->so_snd); 338 SOCKBUF_UNLOCK(&so->so_snd); 339 340 ftxp.snd_nxt = tp->snd_nxt; 341 ftxp.rcv_nxt = tp->rcv_nxt; 342 ftxp.snd_space = bufsize; 343 ftxp.mss = tp->t_maxseg; 344 send_flowc_wr(toep, &ftxp); 345 346 soisconnected(so); 347 } 348 349 static int 350 send_rx_credits(struct adapter *sc, struct toepcb *toep, int credits) 351 { 352 struct wrqe *wr; 353 struct cpl_rx_data_ack *req; 354 uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); 355 356 KASSERT(credits >= 0, ("%s: %d credits", __func__, credits)); 357 358 wr = alloc_wrqe(sizeof(*req), toep->ctrlq); 359 if (wr == NULL) 360 return (0); 361 req = wrtod(wr); 362 363 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); 364 req->credit_dack = htobe32(dack | V_RX_CREDITS(credits)); 365 366 t4_wrq_tx(sc, wr); 367 return (credits); 368 } 369 370 void 371 t4_rcvd(struct toedev *tod, struct tcpcb *tp) 372 { 373 struct adapter *sc = tod->tod_softc; 374 struct inpcb *inp = tp->t_inpcb; 375 struct socket *so = inp->inp_socket; 376 struct sockbuf *sb = &so->so_rcv; 377 struct toepcb *toep = tp->t_toe; 378 int credits; 379 380 INP_WLOCK_ASSERT(inp); 381 382 SOCKBUF_LOCK(sb); 383 KASSERT(toep->sb_cc >= sbused(sb), 384 ("%s: sb %p has more data (%d) than last time (%d).", 385 __func__, sb, sbused(sb), toep->sb_cc)); 386 if (toep->ulp_mode == ULP_MODE_ISCSI) { 387 toep->rx_credits += toep->sb_cc; 388 toep->sb_cc = 0; 389 } else { 390 toep->rx_credits += toep->sb_cc - sbused(sb); 391 toep->sb_cc = sbused(sb); 392 } 393 if (toep->rx_credits > 0 && 394 (tp->rcv_wnd <= 32 * 1024 || toep->rx_credits >= 64 * 1024 || 395 (toep->rx_credits >= 16 * 1024 && tp->rcv_wnd <= 128 * 1024) || 396 toep->sb_cc + tp->rcv_wnd < sb->sb_lowat)) { 397 398 credits = send_rx_credits(sc, toep, toep->rx_credits); 399 toep->rx_credits -= credits; 400 tp->rcv_wnd += credits; 401 tp->rcv_adv += credits; 402 } 403 SOCKBUF_UNLOCK(sb); 404 } 405 406 /* 407 * Close a connection by sending a CPL_CLOSE_CON_REQ message. 408 */ 409 static int 410 close_conn(struct adapter *sc, struct toepcb *toep) 411 { 412 struct wrqe *wr; 413 struct cpl_close_con_req *req; 414 unsigned int tid = toep->tid; 415 416 CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid, 417 toep->flags & TPF_FIN_SENT ? ", IGNORED" : ""); 418 419 if (toep->flags & TPF_FIN_SENT) 420 return (0); 421 422 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 423 ("%s: flowc_wr not sent for tid %u.", __func__, tid)); 424 425 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 426 if (wr == NULL) { 427 /* XXX */ 428 panic("%s: allocation failure.", __func__); 429 } 430 req = wrtod(wr); 431 432 req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | 433 V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr))); 434 req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) | 435 V_FW_WR_FLOWID(tid)); 436 req->wr.wr_lo = cpu_to_be64(0); 437 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 438 req->rsvd = 0; 439 440 toep->flags |= TPF_FIN_SENT; 441 toep->flags &= ~TPF_SEND_FIN; 442 t4_l2t_send(sc, wr, toep->l2te); 443 444 return (0); 445 } 446 447 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16) 448 #define MIN_OFLD_TX_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16)) 449 450 /* Maximum amount of immediate data we could stuff in a WR */ 451 static inline int 452 max_imm_payload(int tx_credits) 453 { 454 const int n = 2; /* Use only up to 2 desc for imm. data WR */ 455 456 KASSERT(tx_credits >= 0 && 457 tx_credits <= MAX_OFLD_TX_CREDITS, 458 ("%s: %d credits", __func__, tx_credits)); 459 460 if (tx_credits < MIN_OFLD_TX_CREDITS) 461 return (0); 462 463 if (tx_credits >= (n * EQ_ESIZE) / 16) 464 return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr)); 465 else 466 return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr)); 467 } 468 469 /* Maximum number of SGL entries we could stuff in a WR */ 470 static inline int 471 max_dsgl_nsegs(int tx_credits) 472 { 473 int nseg = 1; /* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */ 474 int sge_pair_credits = tx_credits - MIN_OFLD_TX_CREDITS; 475 476 KASSERT(tx_credits >= 0 && 477 tx_credits <= MAX_OFLD_TX_CREDITS, 478 ("%s: %d credits", __func__, tx_credits)); 479 480 if (tx_credits < MIN_OFLD_TX_CREDITS) 481 return (0); 482 483 nseg += 2 * (sge_pair_credits * 16 / 24); 484 if ((sge_pair_credits * 16) % 24 == 16) 485 nseg++; 486 487 return (nseg); 488 } 489 490 static inline void 491 write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen, 492 unsigned int plen, uint8_t credits, int shove, int ulp_mode, int txalign) 493 { 494 struct fw_ofld_tx_data_wr *txwr = dst; 495 unsigned int wr_ulp_mode; 496 497 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) | 498 V_FW_WR_IMMDLEN(immdlen)); 499 txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) | 500 V_FW_WR_LEN16(credits)); 501 502 /* for iscsi, the mode & submode setting is per-packet */ 503 if (toep->ulp_mode == ULP_MODE_ISCSI) 504 wr_ulp_mode = V_FW_OFLD_TX_DATA_WR_ULPMODE(ulp_mode >> 4) | 505 V_FW_OFLD_TX_DATA_WR_ULPSUBMODE(ulp_mode & 3); 506 else 507 wr_ulp_mode = V_FW_OFLD_TX_DATA_WR_ULPMODE(toep->ulp_mode); 508 509 txwr->lsodisable_to_proxy = 510 htobe32(wr_ulp_mode | 511 V_FW_OFLD_TX_DATA_WR_URGENT(0) | /* XXX */ 512 V_FW_OFLD_TX_DATA_WR_SHOVE(shove)); 513 txwr->plen = htobe32(plen); 514 515 if (txalign > 0) { 516 struct tcpcb *tp = intotcpcb(toep->inp); 517 518 if (plen < 2 * tp->t_maxseg || is_10G_port(toep->port)) 519 txwr->lsodisable_to_proxy |= 520 htobe32(F_FW_OFLD_TX_DATA_WR_LSODISABLE); 521 else 522 txwr->lsodisable_to_proxy |= 523 htobe32(F_FW_OFLD_TX_DATA_WR_ALIGNPLD | 524 (tp->t_flags & TF_NODELAY ? 0 : 525 F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE)); 526 } 527 } 528 529 /* 530 * Generate a DSGL from a starting mbuf. The total number of segments and the 531 * maximum segments in any one mbuf are provided. 532 */ 533 static void 534 write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n) 535 { 536 struct mbuf *m; 537 struct ulptx_sgl *usgl = dst; 538 int i, j, rc; 539 struct sglist sg; 540 struct sglist_seg segs[n]; 541 542 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__)); 543 544 sglist_init(&sg, n, segs); 545 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 546 V_ULPTX_NSGE(nsegs)); 547 548 i = -1; 549 for (m = start; m != stop; m = m->m_next) { 550 rc = sglist_append(&sg, mtod(m, void *), m->m_len); 551 if (__predict_false(rc != 0)) 552 panic("%s: sglist_append %d", __func__, rc); 553 554 for (j = 0; j < sg.sg_nseg; i++, j++) { 555 if (i < 0) { 556 usgl->len0 = htobe32(segs[j].ss_len); 557 usgl->addr0 = htobe64(segs[j].ss_paddr); 558 } else { 559 usgl->sge[i / 2].len[i & 1] = 560 htobe32(segs[j].ss_len); 561 usgl->sge[i / 2].addr[i & 1] = 562 htobe64(segs[j].ss_paddr); 563 } 564 #ifdef INVARIANTS 565 nsegs--; 566 #endif 567 } 568 sglist_reset(&sg); 569 } 570 if (i & 1) 571 usgl->sge[i / 2].len[1] = htobe32(0); 572 KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, stop %p", 573 __func__, nsegs, start, stop)); 574 } 575 576 /* 577 * Max number of SGL entries an offload tx work request can have. This is 41 578 * (1 + 40) for a full 512B work request. 579 * fw_ofld_tx_data_wr(16B) + ulptx_sgl(16B, 1) + ulptx_sge_pair(480B, 40) 580 */ 581 #define OFLD_SGL_LEN (41) 582 583 /* 584 * Send data and/or a FIN to the peer. 585 * 586 * The socket's so_snd buffer consists of a stream of data starting with sb_mb 587 * and linked together with m_next. sb_sndptr, if set, is the last mbuf that 588 * was transmitted. 589 * 590 * drop indicates the number of bytes that should be dropped from the head of 591 * the send buffer. It is an optimization that lets do_fw4_ack avoid creating 592 * contention on the send buffer lock (before this change it used to do 593 * sowwakeup and then t4_push_frames right after that when recovering from tx 594 * stalls). When drop is set this function MUST drop the bytes and wake up any 595 * writers. 596 */ 597 void 598 t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop) 599 { 600 struct mbuf *sndptr, *m, *sb_sndptr; 601 struct fw_ofld_tx_data_wr *txwr; 602 struct wrqe *wr; 603 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 604 struct inpcb *inp = toep->inp; 605 struct tcpcb *tp = intotcpcb(inp); 606 struct socket *so = inp->inp_socket; 607 struct sockbuf *sb = &so->so_snd; 608 int tx_credits, shove, compl, space, sowwakeup; 609 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 610 611 INP_WLOCK_ASSERT(inp); 612 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 613 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 614 615 KASSERT(toep->ulp_mode == ULP_MODE_NONE || 616 toep->ulp_mode == ULP_MODE_TCPDDP || 617 toep->ulp_mode == ULP_MODE_RDMA, 618 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 619 620 /* 621 * This function doesn't resume by itself. Someone else must clear the 622 * flag and call this function. 623 */ 624 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 625 KASSERT(drop == 0, 626 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 627 return; 628 } 629 630 do { 631 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 632 max_imm = max_imm_payload(tx_credits); 633 max_nsegs = max_dsgl_nsegs(tx_credits); 634 635 SOCKBUF_LOCK(sb); 636 sowwakeup = drop; 637 if (drop) { 638 sbdrop_locked(sb, drop); 639 drop = 0; 640 } 641 sb_sndptr = sb->sb_sndptr; 642 sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb; 643 plen = 0; 644 nsegs = 0; 645 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 646 for (m = sndptr; m != NULL; m = m->m_next) { 647 int n = sglist_count(mtod(m, void *), m->m_len); 648 649 nsegs += n; 650 plen += m->m_len; 651 652 /* This mbuf sent us _over_ the nsegs limit, back out */ 653 if (plen > max_imm && nsegs > max_nsegs) { 654 nsegs -= n; 655 plen -= m->m_len; 656 if (plen == 0) { 657 /* Too few credits */ 658 toep->flags |= TPF_TX_SUSPENDED; 659 if (sowwakeup) 660 sowwakeup_locked(so); 661 else 662 SOCKBUF_UNLOCK(sb); 663 SOCKBUF_UNLOCK_ASSERT(sb); 664 return; 665 } 666 break; 667 } 668 669 if (max_nsegs_1mbuf < n) 670 max_nsegs_1mbuf = n; 671 sb_sndptr = m; /* new sb->sb_sndptr if all goes well */ 672 673 /* This mbuf put us right at the max_nsegs limit */ 674 if (plen > max_imm && nsegs == max_nsegs) { 675 m = m->m_next; 676 break; 677 } 678 } 679 680 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME); 681 space = sbspace(sb); 682 683 if (space <= sb->sb_hiwat * 3 / 8 && 684 toep->plen_nocompl + plen >= sb->sb_hiwat / 4) 685 compl = 1; 686 else 687 compl = 0; 688 689 if (sb->sb_flags & SB_AUTOSIZE && 690 V_tcp_do_autosndbuf && 691 sb->sb_hiwat < V_tcp_autosndbuf_max && 692 space < sb->sb_hiwat / 8) { 693 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc, 694 V_tcp_autosndbuf_max); 695 696 if (!sbreserve_locked(sb, newsize, so, NULL)) 697 sb->sb_flags &= ~SB_AUTOSIZE; 698 else 699 sowwakeup = 1; /* room available */ 700 } 701 if (sowwakeup) 702 sowwakeup_locked(so); 703 else 704 SOCKBUF_UNLOCK(sb); 705 SOCKBUF_UNLOCK_ASSERT(sb); 706 707 /* nothing to send */ 708 if (plen == 0) { 709 KASSERT(m == NULL, 710 ("%s: nothing to send, but m != NULL", __func__)); 711 break; 712 } 713 714 if (__predict_false(toep->flags & TPF_FIN_SENT)) 715 panic("%s: excess tx.", __func__); 716 717 if (plen <= max_imm) { 718 719 /* Immediate data tx */ 720 721 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 722 toep->ofld_txq); 723 if (wr == NULL) { 724 /* XXX: how will we recover from this? */ 725 toep->flags |= TPF_TX_SUSPENDED; 726 return; 727 } 728 txwr = wrtod(wr); 729 credits = howmany(wr->wr_len, 16); 730 write_tx_wr(txwr, toep, plen, plen, credits, shove, 0, 731 sc->tt.tx_align); 732 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 733 nsegs = 0; 734 } else { 735 int wr_len; 736 737 /* DSGL tx */ 738 739 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 740 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 741 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 742 if (wr == NULL) { 743 /* XXX: how will we recover from this? */ 744 toep->flags |= TPF_TX_SUSPENDED; 745 return; 746 } 747 txwr = wrtod(wr); 748 credits = howmany(wr_len, 16); 749 write_tx_wr(txwr, toep, 0, plen, credits, shove, 0, 750 sc->tt.tx_align); 751 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 752 max_nsegs_1mbuf); 753 if (wr_len & 0xf) { 754 uint64_t *pad = (uint64_t *) 755 ((uintptr_t)txwr + wr_len); 756 *pad = 0; 757 } 758 } 759 760 KASSERT(toep->tx_credits >= credits, 761 ("%s: not enough credits", __func__)); 762 763 toep->tx_credits -= credits; 764 toep->tx_nocompl += credits; 765 toep->plen_nocompl += plen; 766 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 767 toep->tx_nocompl >= toep->tx_total / 4) 768 compl = 1; 769 770 if (compl || toep->ulp_mode == ULP_MODE_RDMA) { 771 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 772 toep->tx_nocompl = 0; 773 toep->plen_nocompl = 0; 774 } 775 776 tp->snd_nxt += plen; 777 tp->snd_max += plen; 778 779 SOCKBUF_LOCK(sb); 780 KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__)); 781 sb->sb_sndptr = sb_sndptr; 782 SOCKBUF_UNLOCK(sb); 783 784 toep->flags |= TPF_TX_DATA_SENT; 785 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 786 toep->flags |= TPF_TX_SUSPENDED; 787 788 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 789 txsd->plen = plen; 790 txsd->tx_credits = credits; 791 txsd++; 792 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 793 toep->txsd_pidx = 0; 794 txsd = &toep->txsd[0]; 795 } 796 toep->txsd_avail--; 797 798 t4_l2t_send(sc, wr, toep->l2te); 799 } while (m != NULL); 800 801 /* Send a FIN if requested, but only if there's no more data to send */ 802 if (m == NULL && toep->flags & TPF_SEND_FIN) 803 close_conn(sc, toep); 804 } 805 806 /* Send ULP data over TOE using TX_DATA_WR. We send whole mbuf at once */ 807 void 808 t4_ulp_push_frames(struct adapter *sc, struct toepcb *toep, int drop) 809 { 810 struct mbuf *sndptr, *m = NULL; 811 struct fw_ofld_tx_data_wr *txwr; 812 struct wrqe *wr; 813 unsigned int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 814 struct inpcb *inp = toep->inp; 815 struct tcpcb *tp; 816 struct socket *so; 817 struct sockbuf *sb; 818 int tx_credits, ulp_len = 0, ulp_mode = 0, qlen = 0; 819 int shove, compl; 820 struct ofld_tx_sdesc *txsd; 821 822 INP_WLOCK_ASSERT(inp); 823 if (toep->flags & TPF_ABORT_SHUTDOWN) 824 return; 825 826 tp = intotcpcb(inp); 827 so = inp->inp_socket; 828 sb = &so->so_snd; 829 txsd = &toep->txsd[toep->txsd_pidx]; 830 831 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 832 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 833 834 /* 835 * This function doesn't resume by itself. Someone else must clear the 836 * flag and call this function. 837 */ 838 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) 839 return; 840 841 sndptr = t4_queue_iscsi_callback(so, toep, 1, &qlen); 842 if (!qlen) 843 return; 844 845 do { 846 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 847 max_imm = max_imm_payload(tx_credits); 848 max_nsegs = max_dsgl_nsegs(tx_credits); 849 850 if (drop) { 851 t4_cpl_iscsi_callback(toep->td, toep, &drop, 852 CPL_FW4_ACK); 853 drop = 0; 854 } 855 856 plen = 0; 857 nsegs = 0; 858 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 859 for (m = sndptr; m != NULL; m = m->m_next) { 860 int n = sglist_count(mtod(m, void *), m->m_len); 861 862 nsegs += n; 863 plen += m->m_len; 864 865 /* This mbuf sent us _over_ the nsegs limit, return */ 866 if (plen > max_imm && nsegs > max_nsegs) { 867 toep->flags |= TPF_TX_SUSPENDED; 868 return; 869 } 870 871 if (max_nsegs_1mbuf < n) 872 max_nsegs_1mbuf = n; 873 874 /* This mbuf put us right at the max_nsegs limit */ 875 if (plen > max_imm && nsegs == max_nsegs) { 876 toep->flags |= TPF_TX_SUSPENDED; 877 return; 878 } 879 } 880 881 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME); 882 /* nothing to send */ 883 if (plen == 0) { 884 KASSERT(m == NULL, 885 ("%s: nothing to send, but m != NULL", __func__)); 886 break; 887 } 888 889 if (__predict_false(toep->flags & TPF_FIN_SENT)) 890 panic("%s: excess tx.", __func__); 891 892 ulp_len = plen + ulp_extra_len(sndptr, &ulp_mode); 893 if (plen <= max_imm) { 894 895 /* Immediate data tx */ 896 wr = alloc_wrqe(roundup(sizeof(*txwr) + plen, 16), 897 toep->ofld_txq); 898 if (wr == NULL) { 899 /* XXX: how will we recover from this? */ 900 toep->flags |= TPF_TX_SUSPENDED; 901 return; 902 } 903 txwr = wrtod(wr); 904 credits = howmany(wr->wr_len, 16); 905 write_tx_wr(txwr, toep, plen, ulp_len, credits, shove, 906 ulp_mode, 0); 907 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 908 } else { 909 int wr_len; 910 911 /* DSGL tx */ 912 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 913 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 914 wr = alloc_wrqe(roundup(wr_len, 16), toep->ofld_txq); 915 if (wr == NULL) { 916 /* XXX: how will we recover from this? */ 917 toep->flags |= TPF_TX_SUSPENDED; 918 return; 919 } 920 txwr = wrtod(wr); 921 credits = howmany(wr_len, 16); 922 write_tx_wr(txwr, toep, 0, ulp_len, credits, shove, 923 ulp_mode, 0); 924 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 925 max_nsegs_1mbuf); 926 if (wr_len & 0xf) { 927 uint64_t *pad = (uint64_t *) 928 ((uintptr_t)txwr + wr_len); 929 *pad = 0; 930 } 931 } 932 933 KASSERT(toep->tx_credits >= credits, 934 ("%s: not enough credits", __func__)); 935 936 toep->tx_credits -= credits; 937 toep->tx_nocompl += credits; 938 toep->plen_nocompl += plen; 939 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 940 toep->tx_nocompl >= toep->tx_total / 4) 941 compl = 1; 942 943 if (compl) { 944 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 945 toep->tx_nocompl = 0; 946 toep->plen_nocompl = 0; 947 } 948 tp->snd_nxt += ulp_len; 949 tp->snd_max += ulp_len; 950 951 /* goto next mbuf */ 952 sndptr = m = t4_queue_iscsi_callback(so, toep, 2, &qlen); 953 954 toep->flags |= TPF_TX_DATA_SENT; 955 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) { 956 toep->flags |= TPF_TX_SUSPENDED; 957 } 958 959 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 960 txsd->plen = plen; 961 txsd->tx_credits = credits; 962 txsd++; 963 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 964 toep->txsd_pidx = 0; 965 txsd = &toep->txsd[0]; 966 } 967 toep->txsd_avail--; 968 969 t4_l2t_send(sc, wr, toep->l2te); 970 } while (m != NULL); 971 972 /* Send a FIN if requested, but only if there's no more data to send */ 973 if (m == NULL && toep->flags & TPF_SEND_FIN) 974 close_conn(sc, toep); 975 } 976 977 int 978 t4_tod_output(struct toedev *tod, struct tcpcb *tp) 979 { 980 struct adapter *sc = tod->tod_softc; 981 #ifdef INVARIANTS 982 struct inpcb *inp = tp->t_inpcb; 983 #endif 984 struct toepcb *toep = tp->t_toe; 985 986 INP_WLOCK_ASSERT(inp); 987 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 988 ("%s: inp %p dropped.", __func__, inp)); 989 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 990 991 t4_push_frames(sc, toep, 0); 992 993 return (0); 994 } 995 996 int 997 t4_send_fin(struct toedev *tod, struct tcpcb *tp) 998 { 999 struct adapter *sc = tod->tod_softc; 1000 #ifdef INVARIANTS 1001 struct inpcb *inp = tp->t_inpcb; 1002 #endif 1003 struct toepcb *toep = tp->t_toe; 1004 1005 INP_WLOCK_ASSERT(inp); 1006 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1007 ("%s: inp %p dropped.", __func__, inp)); 1008 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1009 1010 toep->flags |= TPF_SEND_FIN; 1011 if (tp->t_state >= TCPS_ESTABLISHED) { 1012 if (toep->ulp_mode == ULP_MODE_ISCSI) 1013 t4_ulp_push_frames(sc, toep, 0); 1014 else 1015 t4_push_frames(sc, toep, 0); 1016 } 1017 1018 return (0); 1019 } 1020 1021 int 1022 t4_send_rst(struct toedev *tod, struct tcpcb *tp) 1023 { 1024 struct adapter *sc = tod->tod_softc; 1025 #if defined(INVARIANTS) 1026 struct inpcb *inp = tp->t_inpcb; 1027 #endif 1028 struct toepcb *toep = tp->t_toe; 1029 1030 INP_WLOCK_ASSERT(inp); 1031 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1032 ("%s: inp %p dropped.", __func__, inp)); 1033 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1034 1035 /* hmmmm */ 1036 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 1037 ("%s: flowc for tid %u [%s] not sent already", 1038 __func__, toep->tid, tcpstates[tp->t_state])); 1039 1040 send_reset(sc, toep, 0); 1041 return (0); 1042 } 1043 1044 /* 1045 * Peer has sent us a FIN. 1046 */ 1047 static int 1048 do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1049 { 1050 struct adapter *sc = iq->adapter; 1051 const struct cpl_peer_close *cpl = (const void *)(rss + 1); 1052 unsigned int tid = GET_TID(cpl); 1053 struct toepcb *toep = lookup_tid(sc, tid); 1054 struct inpcb *inp = toep->inp; 1055 struct tcpcb *tp = NULL; 1056 struct socket *so; 1057 struct sockbuf *sb; 1058 #ifdef INVARIANTS 1059 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1060 #endif 1061 1062 KASSERT(opcode == CPL_PEER_CLOSE, 1063 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1064 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1065 1066 if (__predict_false(toep->flags & TPF_SYNQE)) { 1067 #ifdef INVARIANTS 1068 struct synq_entry *synqe = (void *)toep; 1069 1070 INP_WLOCK(synqe->lctx->inp); 1071 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1072 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1073 ("%s: listen socket closed but tid %u not aborted.", 1074 __func__, tid)); 1075 } else { 1076 /* 1077 * do_pass_accept_req is still running and will 1078 * eventually take care of this tid. 1079 */ 1080 } 1081 INP_WUNLOCK(synqe->lctx->inp); 1082 #endif 1083 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1084 toep, toep->flags); 1085 return (0); 1086 } 1087 1088 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1089 1090 INP_INFO_WLOCK(&V_tcbinfo); 1091 INP_WLOCK(inp); 1092 tp = intotcpcb(inp); 1093 1094 CTR5(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x, inp %p", __func__, 1095 tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, inp); 1096 1097 if (toep->flags & TPF_ABORT_SHUTDOWN) 1098 goto done; 1099 1100 tp->rcv_nxt++; /* FIN */ 1101 1102 so = inp->inp_socket; 1103 sb = &so->so_rcv; 1104 SOCKBUF_LOCK(sb); 1105 if (__predict_false(toep->ddp_flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE))) { 1106 handle_ddp_close(toep, tp, sb, cpl->rcv_nxt); 1107 } 1108 socantrcvmore_locked(so); /* unlocks the sockbuf */ 1109 1110 if (toep->ulp_mode != ULP_MODE_RDMA) { 1111 KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt), 1112 ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt, 1113 be32toh(cpl->rcv_nxt))); 1114 } 1115 1116 switch (tp->t_state) { 1117 case TCPS_SYN_RECEIVED: 1118 tp->t_starttime = ticks; 1119 /* FALLTHROUGH */ 1120 1121 case TCPS_ESTABLISHED: 1122 tp->t_state = TCPS_CLOSE_WAIT; 1123 break; 1124 1125 case TCPS_FIN_WAIT_1: 1126 tp->t_state = TCPS_CLOSING; 1127 break; 1128 1129 case TCPS_FIN_WAIT_2: 1130 tcp_twstart(tp); 1131 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1132 INP_INFO_WUNLOCK(&V_tcbinfo); 1133 1134 INP_WLOCK(inp); 1135 final_cpl_received(toep); 1136 return (0); 1137 1138 default: 1139 log(LOG_ERR, "%s: TID %u received CPL_PEER_CLOSE in state %d\n", 1140 __func__, tid, tp->t_state); 1141 } 1142 done: 1143 INP_WUNLOCK(inp); 1144 INP_INFO_WUNLOCK(&V_tcbinfo); 1145 return (0); 1146 } 1147 1148 /* 1149 * Peer has ACK'd our FIN. 1150 */ 1151 static int 1152 do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss, 1153 struct mbuf *m) 1154 { 1155 struct adapter *sc = iq->adapter; 1156 const struct cpl_close_con_rpl *cpl = (const void *)(rss + 1); 1157 unsigned int tid = GET_TID(cpl); 1158 struct toepcb *toep = lookup_tid(sc, tid); 1159 struct inpcb *inp = toep->inp; 1160 struct tcpcb *tp = NULL; 1161 struct socket *so = NULL; 1162 #ifdef INVARIANTS 1163 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1164 #endif 1165 1166 KASSERT(opcode == CPL_CLOSE_CON_RPL, 1167 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1168 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1169 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1170 1171 INP_INFO_WLOCK(&V_tcbinfo); 1172 INP_WLOCK(inp); 1173 tp = intotcpcb(inp); 1174 1175 CTR4(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x", 1176 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags); 1177 1178 if (toep->flags & TPF_ABORT_SHUTDOWN) 1179 goto done; 1180 1181 so = inp->inp_socket; 1182 tp->snd_una = be32toh(cpl->snd_nxt) - 1; /* exclude FIN */ 1183 1184 switch (tp->t_state) { 1185 case TCPS_CLOSING: /* see TCPS_FIN_WAIT_2 in do_peer_close too */ 1186 tcp_twstart(tp); 1187 release: 1188 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1189 INP_INFO_WUNLOCK(&V_tcbinfo); 1190 1191 INP_WLOCK(inp); 1192 final_cpl_received(toep); /* no more CPLs expected */ 1193 1194 return (0); 1195 case TCPS_LAST_ACK: 1196 if (tcp_close(tp)) 1197 INP_WUNLOCK(inp); 1198 goto release; 1199 1200 case TCPS_FIN_WAIT_1: 1201 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 1202 soisdisconnected(so); 1203 tp->t_state = TCPS_FIN_WAIT_2; 1204 break; 1205 1206 default: 1207 log(LOG_ERR, 1208 "%s: TID %u received CPL_CLOSE_CON_RPL in state %s\n", 1209 __func__, tid, tcpstates[tp->t_state]); 1210 } 1211 done: 1212 INP_WUNLOCK(inp); 1213 INP_INFO_WUNLOCK(&V_tcbinfo); 1214 return (0); 1215 } 1216 1217 void 1218 send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid, 1219 int rst_status) 1220 { 1221 struct wrqe *wr; 1222 struct cpl_abort_rpl *cpl; 1223 1224 wr = alloc_wrqe(sizeof(*cpl), ofld_txq); 1225 if (wr == NULL) { 1226 /* XXX */ 1227 panic("%s: allocation failure.", __func__); 1228 } 1229 cpl = wrtod(wr); 1230 1231 INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid); 1232 cpl->cmd = rst_status; 1233 1234 t4_wrq_tx(sc, wr); 1235 } 1236 1237 static int 1238 abort_status_to_errno(struct tcpcb *tp, unsigned int abort_reason) 1239 { 1240 switch (abort_reason) { 1241 case CPL_ERR_BAD_SYN: 1242 case CPL_ERR_CONN_RESET: 1243 return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET); 1244 case CPL_ERR_XMIT_TIMEDOUT: 1245 case CPL_ERR_PERSIST_TIMEDOUT: 1246 case CPL_ERR_FINWAIT2_TIMEDOUT: 1247 case CPL_ERR_KEEPALIVE_TIMEDOUT: 1248 return (ETIMEDOUT); 1249 default: 1250 return (EIO); 1251 } 1252 } 1253 1254 int 1255 cpl_not_handled(struct sge_iq *, const struct rss_header *, struct mbuf *); 1256 /* 1257 * tom_cpl_iscsi_callback - 1258 * iscsi and tom would share the following cpl messages, so when any of these 1259 * message is received, after tom is done with processing it, the messages 1260 * needs to be forwarded to iscsi for further processing: 1261 * - CPL_SET_TCB_RPL 1262 * - CPL_RX_DATA_DDP 1263 */ 1264 void (*tom_cpl_iscsi_callback)(struct tom_data *, struct socket *, void *, 1265 unsigned int); 1266 1267 struct mbuf *(*tom_queue_iscsi_callback)(struct socket *, unsigned int, int *); 1268 /* 1269 * Check if the handler function is set for a given CPL 1270 * return 0 if the function is NULL or cpl_not_handled, 1 otherwise. 1271 */ 1272 int 1273 t4tom_cpl_handler_registered(struct adapter *sc, unsigned int opcode) 1274 { 1275 1276 MPASS(opcode < nitems(sc->cpl_handler)); 1277 1278 return (sc->cpl_handler[opcode] && 1279 sc->cpl_handler[opcode] != cpl_not_handled); 1280 } 1281 1282 /* 1283 * set the tom_cpl_iscsi_callback function, this function should be used 1284 * whenever both toe and iscsi need to process the same cpl msg. 1285 */ 1286 void 1287 t4tom_register_cpl_iscsi_callback(void (*fp)(struct tom_data *, struct socket *, 1288 void *, unsigned int)) 1289 { 1290 1291 tom_cpl_iscsi_callback = fp; 1292 } 1293 1294 void 1295 t4tom_register_queue_iscsi_callback(struct mbuf *(*fp)(struct socket *, 1296 unsigned int, int *qlen)) 1297 { 1298 1299 tom_queue_iscsi_callback = fp; 1300 } 1301 1302 int 1303 t4_cpl_iscsi_callback(struct tom_data *td, struct toepcb *toep, void *m, 1304 unsigned int opcode) 1305 { 1306 struct socket *so; 1307 1308 if (opcode == CPL_FW4_ACK) 1309 so = toep->inp->inp_socket; 1310 else { 1311 INP_WLOCK(toep->inp); 1312 so = toep->inp->inp_socket; 1313 INP_WUNLOCK(toep->inp); 1314 } 1315 1316 if (tom_cpl_iscsi_callback && so) { 1317 if (toep->ulp_mode == ULP_MODE_ISCSI) { 1318 tom_cpl_iscsi_callback(td, so, m, opcode); 1319 return (0); 1320 } 1321 } 1322 1323 return (1); 1324 } 1325 1326 struct mbuf * 1327 t4_queue_iscsi_callback(struct socket *so, struct toepcb *toep, 1328 unsigned int cmd, int *qlen) 1329 { 1330 1331 if (tom_queue_iscsi_callback && so) { 1332 if (toep->ulp_mode == ULP_MODE_ISCSI) 1333 return (tom_queue_iscsi_callback(so, cmd, qlen)); 1334 } 1335 1336 return (NULL); 1337 } 1338 1339 /* 1340 * TCP RST from the peer, timeout, or some other such critical error. 1341 */ 1342 static int 1343 do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1344 { 1345 struct adapter *sc = iq->adapter; 1346 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); 1347 unsigned int tid = GET_TID(cpl); 1348 struct toepcb *toep = lookup_tid(sc, tid); 1349 struct sge_wrq *ofld_txq = toep->ofld_txq; 1350 struct inpcb *inp; 1351 struct tcpcb *tp; 1352 #ifdef INVARIANTS 1353 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1354 #endif 1355 1356 KASSERT(opcode == CPL_ABORT_REQ_RSS, 1357 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1358 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1359 1360 if (toep->flags & TPF_SYNQE) 1361 return (do_abort_req_synqe(iq, rss, m)); 1362 1363 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1364 1365 if (negative_advice(cpl->status)) { 1366 CTR4(KTR_CXGBE, "%s: negative advice %d for tid %d (0x%x)", 1367 __func__, cpl->status, tid, toep->flags); 1368 return (0); /* Ignore negative advice */ 1369 } 1370 1371 inp = toep->inp; 1372 INP_INFO_WLOCK(&V_tcbinfo); /* for tcp_close */ 1373 INP_WLOCK(inp); 1374 1375 tp = intotcpcb(inp); 1376 1377 CTR6(KTR_CXGBE, 1378 "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x, status %d", 1379 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, 1380 inp->inp_flags, cpl->status); 1381 1382 /* 1383 * If we'd initiated an abort earlier the reply to it is responsible for 1384 * cleaning up resources. Otherwise we tear everything down right here 1385 * right now. We owe the T4 a CPL_ABORT_RPL no matter what. 1386 */ 1387 if (toep->flags & TPF_ABORT_SHUTDOWN) { 1388 INP_WUNLOCK(inp); 1389 goto done; 1390 } 1391 toep->flags |= TPF_ABORT_SHUTDOWN; 1392 1393 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 1394 struct socket *so = inp->inp_socket; 1395 1396 if (so != NULL) 1397 so_error_set(so, abort_status_to_errno(tp, 1398 cpl->status)); 1399 tp = tcp_close(tp); 1400 if (tp == NULL) 1401 INP_WLOCK(inp); /* re-acquire */ 1402 } 1403 1404 final_cpl_received(toep); 1405 done: 1406 INP_INFO_WUNLOCK(&V_tcbinfo); 1407 send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST); 1408 return (0); 1409 } 1410 1411 /* 1412 * Reply to the CPL_ABORT_REQ (send_reset) 1413 */ 1414 static int 1415 do_abort_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1416 { 1417 struct adapter *sc = iq->adapter; 1418 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); 1419 unsigned int tid = GET_TID(cpl); 1420 struct toepcb *toep = lookup_tid(sc, tid); 1421 struct inpcb *inp = toep->inp; 1422 #ifdef INVARIANTS 1423 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1424 #endif 1425 1426 KASSERT(opcode == CPL_ABORT_RPL_RSS, 1427 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1428 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1429 1430 if (toep->flags & TPF_SYNQE) 1431 return (do_abort_rpl_synqe(iq, rss, m)); 1432 1433 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1434 1435 CTR5(KTR_CXGBE, "%s: tid %u, toep %p, inp %p, status %d", 1436 __func__, tid, toep, inp, cpl->status); 1437 1438 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1439 ("%s: wasn't expecting abort reply", __func__)); 1440 1441 INP_WLOCK(inp); 1442 final_cpl_received(toep); 1443 1444 return (0); 1445 } 1446 1447 static int 1448 do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1449 { 1450 struct adapter *sc = iq->adapter; 1451 const struct cpl_rx_data *cpl = mtod(m, const void *); 1452 unsigned int tid = GET_TID(cpl); 1453 struct toepcb *toep = lookup_tid(sc, tid); 1454 struct inpcb *inp = toep->inp; 1455 struct tcpcb *tp; 1456 struct socket *so; 1457 struct sockbuf *sb; 1458 int len; 1459 uint32_t ddp_placed = 0; 1460 1461 if (__predict_false(toep->flags & TPF_SYNQE)) { 1462 #ifdef INVARIANTS 1463 struct synq_entry *synqe = (void *)toep; 1464 1465 INP_WLOCK(synqe->lctx->inp); 1466 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1467 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1468 ("%s: listen socket closed but tid %u not aborted.", 1469 __func__, tid)); 1470 } else { 1471 /* 1472 * do_pass_accept_req is still running and will 1473 * eventually take care of this tid. 1474 */ 1475 } 1476 INP_WUNLOCK(synqe->lctx->inp); 1477 #endif 1478 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1479 toep, toep->flags); 1480 m_freem(m); 1481 return (0); 1482 } 1483 1484 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1485 1486 /* strip off CPL header */ 1487 m_adj(m, sizeof(*cpl)); 1488 len = m->m_pkthdr.len; 1489 1490 INP_WLOCK(inp); 1491 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) { 1492 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", 1493 __func__, tid, len, inp->inp_flags); 1494 INP_WUNLOCK(inp); 1495 m_freem(m); 1496 return (0); 1497 } 1498 1499 tp = intotcpcb(inp); 1500 1501 if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq))) 1502 ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt; 1503 1504 tp->rcv_nxt += len; 1505 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); 1506 tp->rcv_wnd -= len; 1507 tp->t_rcvtime = ticks; 1508 1509 so = inp_inpcbtosocket(inp); 1510 sb = &so->so_rcv; 1511 SOCKBUF_LOCK(sb); 1512 1513 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 1514 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)", 1515 __func__, tid, len); 1516 m_freem(m); 1517 SOCKBUF_UNLOCK(sb); 1518 INP_WUNLOCK(inp); 1519 1520 INP_INFO_WLOCK(&V_tcbinfo); 1521 INP_WLOCK(inp); 1522 tp = tcp_drop(tp, ECONNRESET); 1523 if (tp) 1524 INP_WUNLOCK(inp); 1525 INP_INFO_WUNLOCK(&V_tcbinfo); 1526 1527 return (0); 1528 } 1529 1530 /* receive buffer autosize */ 1531 if (sb->sb_flags & SB_AUTOSIZE && 1532 V_tcp_do_autorcvbuf && 1533 sb->sb_hiwat < V_tcp_autorcvbuf_max && 1534 len > (sbspace(sb) / 8 * 7)) { 1535 unsigned int hiwat = sb->sb_hiwat; 1536 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc, 1537 V_tcp_autorcvbuf_max); 1538 1539 if (!sbreserve_locked(sb, newsize, so, NULL)) 1540 sb->sb_flags &= ~SB_AUTOSIZE; 1541 else 1542 toep->rx_credits += newsize - hiwat; 1543 } 1544 1545 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1546 int changed = !(toep->ddp_flags & DDP_ON) ^ cpl->ddp_off; 1547 1548 if (changed) { 1549 if (toep->ddp_flags & DDP_SC_REQ) 1550 toep->ddp_flags ^= DDP_ON | DDP_SC_REQ; 1551 else { 1552 KASSERT(cpl->ddp_off == 1, 1553 ("%s: DDP switched on by itself.", 1554 __func__)); 1555 1556 /* Fell out of DDP mode */ 1557 toep->ddp_flags &= ~(DDP_ON | DDP_BUF0_ACTIVE | 1558 DDP_BUF1_ACTIVE); 1559 1560 if (ddp_placed) 1561 insert_ddp_data(toep, ddp_placed); 1562 } 1563 } 1564 1565 if ((toep->ddp_flags & DDP_OK) == 0 && 1566 time_uptime >= toep->ddp_disabled + DDP_RETRY_WAIT) { 1567 toep->ddp_score = DDP_LOW_SCORE; 1568 toep->ddp_flags |= DDP_OK; 1569 CTR3(KTR_CXGBE, "%s: tid %u DDP_OK @ %u", 1570 __func__, tid, time_uptime); 1571 } 1572 1573 if (toep->ddp_flags & DDP_ON) { 1574 1575 /* 1576 * CPL_RX_DATA with DDP on can only be an indicate. Ask 1577 * soreceive to post a buffer or disable DDP. The 1578 * payload that arrived in this indicate is appended to 1579 * the socket buffer as usual. 1580 */ 1581 1582 #if 0 1583 CTR5(KTR_CXGBE, 1584 "%s: tid %u (0x%x) DDP indicate (seq 0x%x, len %d)", 1585 __func__, tid, toep->flags, be32toh(cpl->seq), len); 1586 #endif 1587 sb->sb_flags |= SB_DDP_INDICATE; 1588 } else if ((toep->ddp_flags & (DDP_OK|DDP_SC_REQ)) == DDP_OK && 1589 tp->rcv_wnd > DDP_RSVD_WIN && len >= sc->tt.ddp_thres) { 1590 1591 /* 1592 * DDP allowed but isn't on (and a request to switch it 1593 * on isn't pending either), and conditions are ripe for 1594 * it to work. Switch it on. 1595 */ 1596 1597 enable_ddp(sc, toep); 1598 } 1599 } 1600 1601 KASSERT(toep->sb_cc >= sbused(sb), 1602 ("%s: sb %p has more data (%d) than last time (%d).", 1603 __func__, sb, sbused(sb), toep->sb_cc)); 1604 toep->rx_credits += toep->sb_cc - sbused(sb); 1605 sbappendstream_locked(sb, m, 0); 1606 toep->sb_cc = sbused(sb); 1607 if (toep->rx_credits > 0 && toep->sb_cc + tp->rcv_wnd < sb->sb_lowat) { 1608 int credits; 1609 1610 credits = send_rx_credits(sc, toep, toep->rx_credits); 1611 toep->rx_credits -= credits; 1612 tp->rcv_wnd += credits; 1613 tp->rcv_adv += credits; 1614 } 1615 sorwakeup_locked(so); 1616 SOCKBUF_UNLOCK_ASSERT(sb); 1617 1618 INP_WUNLOCK(inp); 1619 return (0); 1620 } 1621 1622 #define S_CPL_FW4_ACK_OPCODE 24 1623 #define M_CPL_FW4_ACK_OPCODE 0xff 1624 #define V_CPL_FW4_ACK_OPCODE(x) ((x) << S_CPL_FW4_ACK_OPCODE) 1625 #define G_CPL_FW4_ACK_OPCODE(x) \ 1626 (((x) >> S_CPL_FW4_ACK_OPCODE) & M_CPL_FW4_ACK_OPCODE) 1627 1628 #define S_CPL_FW4_ACK_FLOWID 0 1629 #define M_CPL_FW4_ACK_FLOWID 0xffffff 1630 #define V_CPL_FW4_ACK_FLOWID(x) ((x) << S_CPL_FW4_ACK_FLOWID) 1631 #define G_CPL_FW4_ACK_FLOWID(x) \ 1632 (((x) >> S_CPL_FW4_ACK_FLOWID) & M_CPL_FW4_ACK_FLOWID) 1633 1634 #define S_CPL_FW4_ACK_CR 24 1635 #define M_CPL_FW4_ACK_CR 0xff 1636 #define V_CPL_FW4_ACK_CR(x) ((x) << S_CPL_FW4_ACK_CR) 1637 #define G_CPL_FW4_ACK_CR(x) (((x) >> S_CPL_FW4_ACK_CR) & M_CPL_FW4_ACK_CR) 1638 1639 #define S_CPL_FW4_ACK_SEQVAL 0 1640 #define M_CPL_FW4_ACK_SEQVAL 0x1 1641 #define V_CPL_FW4_ACK_SEQVAL(x) ((x) << S_CPL_FW4_ACK_SEQVAL) 1642 #define G_CPL_FW4_ACK_SEQVAL(x) \ 1643 (((x) >> S_CPL_FW4_ACK_SEQVAL) & M_CPL_FW4_ACK_SEQVAL) 1644 #define F_CPL_FW4_ACK_SEQVAL V_CPL_FW4_ACK_SEQVAL(1U) 1645 1646 static int 1647 do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1648 { 1649 struct adapter *sc = iq->adapter; 1650 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 1651 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 1652 struct toepcb *toep = lookup_tid(sc, tid); 1653 struct inpcb *inp; 1654 struct tcpcb *tp; 1655 struct socket *so; 1656 uint8_t credits = cpl->credits; 1657 struct ofld_tx_sdesc *txsd; 1658 int plen; 1659 #ifdef INVARIANTS 1660 unsigned int opcode = G_CPL_FW4_ACK_OPCODE(be32toh(OPCODE_TID(cpl))); 1661 #endif 1662 1663 /* 1664 * Very unusual case: we'd sent a flowc + abort_req for a synq entry and 1665 * now this comes back carrying the credits for the flowc. 1666 */ 1667 if (__predict_false(toep->flags & TPF_SYNQE)) { 1668 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1669 ("%s: credits for a synq entry %p", __func__, toep)); 1670 return (0); 1671 } 1672 1673 inp = toep->inp; 1674 1675 KASSERT(opcode == CPL_FW4_ACK, 1676 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1677 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1678 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1679 1680 INP_WLOCK(inp); 1681 1682 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) { 1683 INP_WUNLOCK(inp); 1684 return (0); 1685 } 1686 1687 KASSERT((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0, 1688 ("%s: inp_flags 0x%x", __func__, inp->inp_flags)); 1689 1690 tp = intotcpcb(inp); 1691 1692 if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) { 1693 tcp_seq snd_una = be32toh(cpl->snd_una); 1694 1695 #ifdef INVARIANTS 1696 if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) { 1697 log(LOG_ERR, 1698 "%s: unexpected seq# %x for TID %u, snd_una %x\n", 1699 __func__, snd_una, toep->tid, tp->snd_una); 1700 } 1701 #endif 1702 1703 if (tp->snd_una != snd_una) { 1704 tp->snd_una = snd_una; 1705 tp->ts_recent_age = tcp_ts_getticks(); 1706 } 1707 } 1708 1709 so = inp->inp_socket; 1710 txsd = &toep->txsd[toep->txsd_cidx]; 1711 plen = 0; 1712 while (credits) { 1713 KASSERT(credits >= txsd->tx_credits, 1714 ("%s: too many (or partial) credits", __func__)); 1715 credits -= txsd->tx_credits; 1716 toep->tx_credits += txsd->tx_credits; 1717 plen += txsd->plen; 1718 txsd++; 1719 toep->txsd_avail++; 1720 KASSERT(toep->txsd_avail <= toep->txsd_total, 1721 ("%s: txsd avail > total", __func__)); 1722 if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) { 1723 txsd = &toep->txsd[0]; 1724 toep->txsd_cidx = 0; 1725 } 1726 } 1727 1728 if (toep->tx_credits == toep->tx_total) { 1729 toep->tx_nocompl = 0; 1730 toep->plen_nocompl = 0; 1731 } 1732 1733 if (toep->flags & TPF_TX_SUSPENDED && 1734 toep->tx_credits >= toep->tx_total / 4) { 1735 toep->flags &= ~TPF_TX_SUSPENDED; 1736 if (toep->ulp_mode == ULP_MODE_ISCSI) 1737 t4_ulp_push_frames(sc, toep, plen); 1738 else 1739 t4_push_frames(sc, toep, plen); 1740 } else if (plen > 0) { 1741 struct sockbuf *sb = &so->so_snd; 1742 1743 if (toep->ulp_mode == ULP_MODE_ISCSI) 1744 t4_cpl_iscsi_callback(toep->td, toep, &plen, 1745 CPL_FW4_ACK); 1746 else { 1747 SOCKBUF_LOCK(sb); 1748 sbdrop_locked(sb, plen); 1749 sowwakeup_locked(so); 1750 SOCKBUF_UNLOCK_ASSERT(sb); 1751 } 1752 } 1753 1754 INP_WUNLOCK(inp); 1755 1756 return (0); 1757 } 1758 1759 static int 1760 do_set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1761 { 1762 struct adapter *sc = iq->adapter; 1763 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 1764 unsigned int tid = GET_TID(cpl); 1765 #ifdef INVARIANTS 1766 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1767 #endif 1768 1769 KASSERT(opcode == CPL_SET_TCB_RPL, 1770 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1771 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1772 1773 if (is_ftid(sc, tid)) 1774 return (t4_filter_rpl(iq, rss, m)); /* TCB is a filter */ 1775 else { 1776 struct toepcb *toep = lookup_tid(sc, tid); 1777 1778 t4_cpl_iscsi_callback(toep->td, toep, m, CPL_SET_TCB_RPL); 1779 return (0); 1780 } 1781 1782 CXGBE_UNIMPLEMENTED(__func__); 1783 } 1784 1785 void 1786 t4_set_tcb_field(struct adapter *sc, struct toepcb *toep, int ctrl, 1787 uint16_t word, uint64_t mask, uint64_t val) 1788 { 1789 struct wrqe *wr; 1790 struct cpl_set_tcb_field *req; 1791 1792 wr = alloc_wrqe(sizeof(*req), ctrl ? toep->ctrlq : toep->ofld_txq); 1793 if (wr == NULL) { 1794 /* XXX */ 1795 panic("%s: allocation failure.", __func__); 1796 } 1797 req = wrtod(wr); 1798 1799 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid); 1800 req->reply_ctrl = htobe16(V_NO_REPLY(1) | 1801 V_QUEUENO(toep->ofld_rxq->iq.abs_id)); 1802 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0)); 1803 req->mask = htobe64(mask); 1804 req->val = htobe64(val); 1805 1806 t4_wrq_tx(sc, wr); 1807 } 1808 1809 void 1810 t4_init_cpl_io_handlers(struct adapter *sc) 1811 { 1812 1813 t4_register_cpl_handler(sc, CPL_PEER_CLOSE, do_peer_close); 1814 t4_register_cpl_handler(sc, CPL_CLOSE_CON_RPL, do_close_con_rpl); 1815 t4_register_cpl_handler(sc, CPL_ABORT_REQ_RSS, do_abort_req); 1816 t4_register_cpl_handler(sc, CPL_ABORT_RPL_RSS, do_abort_rpl); 1817 t4_register_cpl_handler(sc, CPL_RX_DATA, do_rx_data); 1818 t4_register_cpl_handler(sc, CPL_FW4_ACK, do_fw4_ack); 1819 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, do_set_tcb_rpl); 1820 } 1821 1822 void 1823 t4_uninit_cpl_io_handlers(struct adapter *sc) 1824 { 1825 1826 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl); 1827 } 1828 #endif 1829