1 /*- 2 * Copyright (c) 2012 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 33 #ifdef TCP_OFFLOAD 34 #include <sys/param.h> 35 #include <sys/types.h> 36 #include <sys/kernel.h> 37 #include <sys/ktr.h> 38 #include <sys/module.h> 39 #include <sys/protosw.h> 40 #include <sys/domain.h> 41 #include <sys/socket.h> 42 #include <sys/socketvar.h> 43 #include <sys/sglist.h> 44 #include <netinet/in.h> 45 #include <netinet/in_pcb.h> 46 #include <netinet/ip.h> 47 #include <netinet/ip6.h> 48 #include <netinet/tcp_var.h> 49 #define TCPSTATES 50 #include <netinet/tcp_fsm.h> 51 #include <netinet/tcp_seq.h> 52 #include <netinet/toecore.h> 53 54 #include "common/common.h" 55 #include "common/t4_msg.h" 56 #include "common/t4_regs.h" 57 #include "common/t4_tcb.h" 58 #include "tom/t4_tom_l2t.h" 59 #include "tom/t4_tom.h" 60 61 VNET_DECLARE(int, tcp_do_autosndbuf); 62 #define V_tcp_do_autosndbuf VNET(tcp_do_autosndbuf) 63 VNET_DECLARE(int, tcp_autosndbuf_inc); 64 #define V_tcp_autosndbuf_inc VNET(tcp_autosndbuf_inc) 65 VNET_DECLARE(int, tcp_autosndbuf_max); 66 #define V_tcp_autosndbuf_max VNET(tcp_autosndbuf_max) 67 VNET_DECLARE(int, tcp_do_autorcvbuf); 68 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf) 69 VNET_DECLARE(int, tcp_autorcvbuf_inc); 70 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc) 71 VNET_DECLARE(int, tcp_autorcvbuf_max); 72 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max) 73 74 /* 75 * For ULP connections HW may add headers, e.g., for digests, that aren't part 76 * of the messages sent by the host but that are part of the TCP payload and 77 * therefore consume TCP sequence space. Tx connection parameters that 78 * operate in TCP sequence space are affected by the HW additions and need to 79 * compensate for them to accurately track TCP sequence numbers. This array 80 * contains the compensating extra lengths for ULP packets. It is indexed by 81 * a packet's ULP submode. 82 */ 83 const unsigned int t4_ulp_extra_len[] = {0, 4, 4, 8}; 84 85 /* 86 * Return the length of any HW additions that will be made to a Tx packet. 87 * Such additions can happen for some types of ULP packets. 88 */ 89 static inline unsigned int 90 ulp_extra_len(struct mbuf *m, int *ulp_mode) 91 { 92 struct m_tag *mtag; 93 94 if ((mtag = m_tag_find(m, CXGBE_ISCSI_MBUF_TAG, NULL)) == NULL) 95 return (0); 96 *ulp_mode = *((int *)(mtag + 1)); 97 98 return (t4_ulp_extra_len[*ulp_mode & 3]); 99 } 100 101 void 102 send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp) 103 { 104 struct wrqe *wr; 105 struct fw_flowc_wr *flowc; 106 unsigned int nparams = ftxp ? 8 : 6, flowclen; 107 struct port_info *pi = toep->port; 108 struct adapter *sc = pi->adapter; 109 unsigned int pfvf = G_FW_VIID_PFN(pi->viid) << S_FW_VIID_PFN; 110 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 111 112 KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT), 113 ("%s: flowc for tid %u sent already", __func__, toep->tid)); 114 115 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); 116 117 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq); 118 if (wr == NULL) { 119 /* XXX */ 120 panic("%s: allocation failure.", __func__); 121 } 122 flowc = wrtod(wr); 123 memset(flowc, 0, wr->wr_len); 124 125 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 126 V_FW_FLOWC_WR_NPARAMS(nparams)); 127 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | 128 V_FW_WR_FLOWID(toep->tid)); 129 130 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 131 flowc->mnemval[0].val = htobe32(pfvf); 132 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 133 flowc->mnemval[1].val = htobe32(pi->tx_chan); 134 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 135 flowc->mnemval[2].val = htobe32(pi->tx_chan); 136 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 137 flowc->mnemval[3].val = htobe32(toep->ofld_rxq->iq.abs_id); 138 if (ftxp) { 139 uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf); 140 141 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 142 flowc->mnemval[4].val = htobe32(ftxp->snd_nxt); 143 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 144 flowc->mnemval[5].val = htobe32(ftxp->rcv_nxt); 145 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 146 flowc->mnemval[6].val = htobe32(sndbuf); 147 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 148 flowc->mnemval[7].val = htobe32(ftxp->mss); 149 150 CTR6(KTR_CXGBE, 151 "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x", 152 __func__, toep->tid, ftxp->mss, sndbuf, ftxp->snd_nxt, 153 ftxp->rcv_nxt); 154 } else { 155 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF; 156 flowc->mnemval[4].val = htobe32(512); 157 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS; 158 flowc->mnemval[5].val = htobe32(512); 159 160 CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); 161 } 162 163 txsd->tx_credits = howmany(flowclen, 16); 164 txsd->plen = 0; 165 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 166 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 167 toep->tx_credits -= txsd->tx_credits; 168 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 169 toep->txsd_pidx = 0; 170 toep->txsd_avail--; 171 172 toep->flags |= TPF_FLOWC_WR_SENT; 173 t4_wrq_tx(sc, wr); 174 } 175 176 void 177 send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt) 178 { 179 struct wrqe *wr; 180 struct cpl_abort_req *req; 181 int tid = toep->tid; 182 struct inpcb *inp = toep->inp; 183 struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */ 184 185 INP_WLOCK_ASSERT(inp); 186 187 CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s", 188 __func__, toep->tid, 189 inp->inp_flags & INP_DROPPED ? "inp dropped" : 190 tcpstates[tp->t_state], 191 toep->flags, inp->inp_flags, 192 toep->flags & TPF_ABORT_SHUTDOWN ? 193 " (abort already in progress)" : ""); 194 195 if (toep->flags & TPF_ABORT_SHUTDOWN) 196 return; /* abort already in progress */ 197 198 toep->flags |= TPF_ABORT_SHUTDOWN; 199 200 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 201 ("%s: flowc_wr not sent for tid %d.", __func__, tid)); 202 203 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 204 if (wr == NULL) { 205 /* XXX */ 206 panic("%s: allocation failure.", __func__); 207 } 208 req = wrtod(wr); 209 210 INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid); 211 if (inp->inp_flags & INP_DROPPED) 212 req->rsvd0 = htobe32(snd_nxt); 213 else 214 req->rsvd0 = htobe32(tp->snd_nxt); 215 req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT); 216 req->cmd = CPL_ABORT_SEND_RST; 217 218 /* 219 * XXX: What's the correct way to tell that the inp hasn't been detached 220 * from its socket? Should I even be flushing the snd buffer here? 221 */ 222 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 223 struct socket *so = inp->inp_socket; 224 225 if (so != NULL) /* because I'm not sure. See comment above */ 226 sbflush(&so->so_snd); 227 } 228 229 t4_l2t_send(sc, wr, toep->l2te); 230 } 231 232 /* 233 * Called when a connection is established to translate the TCP options 234 * reported by HW to FreeBSD's native format. 235 */ 236 static void 237 assign_rxopt(struct tcpcb *tp, unsigned int opt) 238 { 239 struct toepcb *toep = tp->t_toe; 240 struct inpcb *inp = tp->t_inpcb; 241 struct adapter *sc = td_adapter(toep->td); 242 int n; 243 244 INP_LOCK_ASSERT(inp); 245 246 if (inp->inp_inc.inc_flags & INC_ISIPV6) 247 n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 248 else 249 n = sizeof(struct ip) + sizeof(struct tcphdr); 250 tp->t_maxseg = tp->t_maxopd = sc->params.mtus[G_TCPOPT_MSS(opt)] - n; 251 252 CTR4(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u)", __func__, toep->tid, 253 G_TCPOPT_MSS(opt), sc->params.mtus[G_TCPOPT_MSS(opt)]); 254 255 if (G_TCPOPT_TSTAMP(opt)) { 256 tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */ 257 tp->ts_recent = 0; /* hmmm */ 258 tp->ts_recent_age = tcp_ts_getticks(); 259 tp->t_maxseg -= TCPOLEN_TSTAMP_APPA; 260 } 261 262 if (G_TCPOPT_SACK(opt)) 263 tp->t_flags |= TF_SACK_PERMIT; /* should already be set */ 264 else 265 tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */ 266 267 if (G_TCPOPT_WSCALE_OK(opt)) 268 tp->t_flags |= TF_RCVD_SCALE; 269 270 /* Doing window scaling? */ 271 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 272 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 273 tp->rcv_scale = tp->request_r_scale; 274 tp->snd_scale = G_TCPOPT_SND_WSCALE(opt); 275 } 276 } 277 278 /* 279 * Completes some final bits of initialization for just established connections 280 * and changes their state to TCPS_ESTABLISHED. 281 * 282 * The ISNs are from after the exchange of SYNs. i.e., the true ISN + 1. 283 */ 284 void 285 make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn, 286 uint16_t opt) 287 { 288 struct inpcb *inp = toep->inp; 289 struct socket *so = inp->inp_socket; 290 struct tcpcb *tp = intotcpcb(inp); 291 long bufsize; 292 uint32_t iss = be32toh(snd_isn) - 1; /* true ISS */ 293 uint32_t irs = be32toh(rcv_isn) - 1; /* true IRS */ 294 uint16_t tcpopt = be16toh(opt); 295 struct flowc_tx_params ftxp; 296 297 INP_WLOCK_ASSERT(inp); 298 KASSERT(tp->t_state == TCPS_SYN_SENT || 299 tp->t_state == TCPS_SYN_RECEIVED, 300 ("%s: TCP state %s", __func__, tcpstates[tp->t_state])); 301 302 CTR4(KTR_CXGBE, "%s: tid %d, toep %p, inp %p", 303 __func__, toep->tid, toep, inp); 304 305 tp->t_state = TCPS_ESTABLISHED; 306 tp->t_starttime = ticks; 307 TCPSTAT_INC(tcps_connects); 308 309 tp->irs = irs; 310 tcp_rcvseqinit(tp); 311 tp->rcv_wnd = toep->rx_credits << 10; 312 tp->rcv_adv += tp->rcv_wnd; 313 tp->last_ack_sent = tp->rcv_nxt; 314 315 /* 316 * If we were unable to send all rx credits via opt0, save the remainder 317 * in rx_credits so that they can be handed over with the next credit 318 * update. 319 */ 320 SOCKBUF_LOCK(&so->so_rcv); 321 bufsize = select_rcv_wnd(so); 322 SOCKBUF_UNLOCK(&so->so_rcv); 323 toep->rx_credits = bufsize - tp->rcv_wnd; 324 325 tp->iss = iss; 326 tcp_sendseqinit(tp); 327 tp->snd_una = iss + 1; 328 tp->snd_nxt = iss + 1; 329 tp->snd_max = iss + 1; 330 331 assign_rxopt(tp, tcpopt); 332 333 SOCKBUF_LOCK(&so->so_snd); 334 if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf) 335 bufsize = V_tcp_autosndbuf_max; 336 else 337 bufsize = sbspace(&so->so_snd); 338 SOCKBUF_UNLOCK(&so->so_snd); 339 340 ftxp.snd_nxt = tp->snd_nxt; 341 ftxp.rcv_nxt = tp->rcv_nxt; 342 ftxp.snd_space = bufsize; 343 ftxp.mss = tp->t_maxseg; 344 send_flowc_wr(toep, &ftxp); 345 346 soisconnected(so); 347 } 348 349 static int 350 send_rx_credits(struct adapter *sc, struct toepcb *toep, int credits) 351 { 352 struct wrqe *wr; 353 struct cpl_rx_data_ack *req; 354 uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); 355 356 KASSERT(credits >= 0, ("%s: %d credits", __func__, credits)); 357 358 wr = alloc_wrqe(sizeof(*req), toep->ctrlq); 359 if (wr == NULL) 360 return (0); 361 req = wrtod(wr); 362 363 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); 364 req->credit_dack = htobe32(dack | V_RX_CREDITS(credits)); 365 366 t4_wrq_tx(sc, wr); 367 return (credits); 368 } 369 370 void 371 t4_rcvd(struct toedev *tod, struct tcpcb *tp) 372 { 373 struct adapter *sc = tod->tod_softc; 374 struct inpcb *inp = tp->t_inpcb; 375 struct socket *so = inp->inp_socket; 376 struct sockbuf *sb = &so->so_rcv; 377 struct toepcb *toep = tp->t_toe; 378 int credits; 379 380 INP_WLOCK_ASSERT(inp); 381 382 SOCKBUF_LOCK(sb); 383 KASSERT(toep->sb_cc >= sbused(sb), 384 ("%s: sb %p has more data (%d) than last time (%d).", 385 __func__, sb, sbused(sb), toep->sb_cc)); 386 if (toep->ulp_mode == ULP_MODE_ISCSI) { 387 toep->rx_credits += toep->sb_cc; 388 toep->sb_cc = 0; 389 } else { 390 toep->rx_credits += toep->sb_cc - sbused(sb); 391 toep->sb_cc = sbused(sb); 392 } 393 credits = toep->rx_credits; 394 SOCKBUF_UNLOCK(sb); 395 396 if (credits > 0 && 397 (credits + 16384 >= tp->rcv_wnd || credits >= 15 * 1024)) { 398 399 credits = send_rx_credits(sc, toep, credits); 400 SOCKBUF_LOCK(sb); 401 toep->rx_credits -= credits; 402 SOCKBUF_UNLOCK(sb); 403 tp->rcv_wnd += credits; 404 tp->rcv_adv += credits; 405 } 406 } 407 408 /* 409 * Close a connection by sending a CPL_CLOSE_CON_REQ message. 410 */ 411 static int 412 close_conn(struct adapter *sc, struct toepcb *toep) 413 { 414 struct wrqe *wr; 415 struct cpl_close_con_req *req; 416 unsigned int tid = toep->tid; 417 418 CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid, 419 toep->flags & TPF_FIN_SENT ? ", IGNORED" : ""); 420 421 if (toep->flags & TPF_FIN_SENT) 422 return (0); 423 424 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 425 ("%s: flowc_wr not sent for tid %u.", __func__, tid)); 426 427 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 428 if (wr == NULL) { 429 /* XXX */ 430 panic("%s: allocation failure.", __func__); 431 } 432 req = wrtod(wr); 433 434 req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | 435 V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr))); 436 req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) | 437 V_FW_WR_FLOWID(tid)); 438 req->wr.wr_lo = cpu_to_be64(0); 439 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 440 req->rsvd = 0; 441 442 toep->flags |= TPF_FIN_SENT; 443 toep->flags &= ~TPF_SEND_FIN; 444 t4_l2t_send(sc, wr, toep->l2te); 445 446 return (0); 447 } 448 449 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16) 450 #define MIN_OFLD_TX_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16)) 451 452 /* Maximum amount of immediate data we could stuff in a WR */ 453 static inline int 454 max_imm_payload(int tx_credits) 455 { 456 const int n = 2; /* Use only up to 2 desc for imm. data WR */ 457 458 KASSERT(tx_credits >= 0 && 459 tx_credits <= MAX_OFLD_TX_CREDITS, 460 ("%s: %d credits", __func__, tx_credits)); 461 462 if (tx_credits < MIN_OFLD_TX_CREDITS) 463 return (0); 464 465 if (tx_credits >= (n * EQ_ESIZE) / 16) 466 return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr)); 467 else 468 return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr)); 469 } 470 471 /* Maximum number of SGL entries we could stuff in a WR */ 472 static inline int 473 max_dsgl_nsegs(int tx_credits) 474 { 475 int nseg = 1; /* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */ 476 int sge_pair_credits = tx_credits - MIN_OFLD_TX_CREDITS; 477 478 KASSERT(tx_credits >= 0 && 479 tx_credits <= MAX_OFLD_TX_CREDITS, 480 ("%s: %d credits", __func__, tx_credits)); 481 482 if (tx_credits < MIN_OFLD_TX_CREDITS) 483 return (0); 484 485 nseg += 2 * (sge_pair_credits * 16 / 24); 486 if ((sge_pair_credits * 16) % 24 == 16) 487 nseg++; 488 489 return (nseg); 490 } 491 492 static inline void 493 write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen, 494 unsigned int plen, uint8_t credits, int shove, int ulp_mode, int txalign) 495 { 496 struct fw_ofld_tx_data_wr *txwr = dst; 497 unsigned int wr_ulp_mode; 498 499 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) | 500 V_FW_WR_IMMDLEN(immdlen)); 501 txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) | 502 V_FW_WR_LEN16(credits)); 503 504 /* for iscsi, the mode & submode setting is per-packet */ 505 if (toep->ulp_mode == ULP_MODE_ISCSI) 506 wr_ulp_mode = V_FW_OFLD_TX_DATA_WR_ULPMODE(ulp_mode >> 4) | 507 V_FW_OFLD_TX_DATA_WR_ULPSUBMODE(ulp_mode & 3); 508 else 509 wr_ulp_mode = V_FW_OFLD_TX_DATA_WR_ULPMODE(toep->ulp_mode); 510 511 txwr->lsodisable_to_proxy = 512 htobe32(wr_ulp_mode | 513 V_FW_OFLD_TX_DATA_WR_URGENT(0) | /* XXX */ 514 V_FW_OFLD_TX_DATA_WR_SHOVE(shove)); 515 txwr->plen = htobe32(plen); 516 517 if (txalign > 0) { 518 struct tcpcb *tp = intotcpcb(toep->inp); 519 520 if (plen < 2 * tp->t_maxseg || is_10G_port(toep->port)) 521 txwr->lsodisable_to_proxy |= 522 htobe32(F_FW_OFLD_TX_DATA_WR_LSODISABLE); 523 else 524 txwr->lsodisable_to_proxy |= 525 htobe32(F_FW_OFLD_TX_DATA_WR_ALIGNPLD | 526 (tp->t_flags & TF_NODELAY ? 0 : 527 F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE)); 528 } 529 } 530 531 /* 532 * Generate a DSGL from a starting mbuf. The total number of segments and the 533 * maximum segments in any one mbuf are provided. 534 */ 535 static void 536 write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n) 537 { 538 struct mbuf *m; 539 struct ulptx_sgl *usgl = dst; 540 int i, j, rc; 541 struct sglist sg; 542 struct sglist_seg segs[n]; 543 544 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__)); 545 546 sglist_init(&sg, n, segs); 547 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 548 V_ULPTX_NSGE(nsegs)); 549 550 i = -1; 551 for (m = start; m != stop; m = m->m_next) { 552 rc = sglist_append(&sg, mtod(m, void *), m->m_len); 553 if (__predict_false(rc != 0)) 554 panic("%s: sglist_append %d", __func__, rc); 555 556 for (j = 0; j < sg.sg_nseg; i++, j++) { 557 if (i < 0) { 558 usgl->len0 = htobe32(segs[j].ss_len); 559 usgl->addr0 = htobe64(segs[j].ss_paddr); 560 } else { 561 usgl->sge[i / 2].len[i & 1] = 562 htobe32(segs[j].ss_len); 563 usgl->sge[i / 2].addr[i & 1] = 564 htobe64(segs[j].ss_paddr); 565 } 566 #ifdef INVARIANTS 567 nsegs--; 568 #endif 569 } 570 sglist_reset(&sg); 571 } 572 if (i & 1) 573 usgl->sge[i / 2].len[1] = htobe32(0); 574 KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, stop %p", 575 __func__, nsegs, start, stop)); 576 } 577 578 /* 579 * Max number of SGL entries an offload tx work request can have. This is 41 580 * (1 + 40) for a full 512B work request. 581 * fw_ofld_tx_data_wr(16B) + ulptx_sgl(16B, 1) + ulptx_sge_pair(480B, 40) 582 */ 583 #define OFLD_SGL_LEN (41) 584 585 /* 586 * Send data and/or a FIN to the peer. 587 * 588 * The socket's so_snd buffer consists of a stream of data starting with sb_mb 589 * and linked together with m_next. sb_sndptr, if set, is the last mbuf that 590 * was transmitted. 591 * 592 * drop indicates the number of bytes that should be dropped from the head of 593 * the send buffer. It is an optimization that lets do_fw4_ack avoid creating 594 * contention on the send buffer lock (before this change it used to do 595 * sowwakeup and then t4_push_frames right after that when recovering from tx 596 * stalls). When drop is set this function MUST drop the bytes and wake up any 597 * writers. 598 */ 599 void 600 t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop) 601 { 602 struct mbuf *sndptr, *m, *sb_sndptr; 603 struct fw_ofld_tx_data_wr *txwr; 604 struct wrqe *wr; 605 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 606 struct inpcb *inp = toep->inp; 607 struct tcpcb *tp = intotcpcb(inp); 608 struct socket *so = inp->inp_socket; 609 struct sockbuf *sb = &so->so_snd; 610 int tx_credits, shove, compl, space, sowwakeup; 611 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 612 613 INP_WLOCK_ASSERT(inp); 614 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 615 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 616 617 KASSERT(toep->ulp_mode == ULP_MODE_NONE || 618 toep->ulp_mode == ULP_MODE_TCPDDP || 619 toep->ulp_mode == ULP_MODE_RDMA, 620 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 621 622 /* 623 * This function doesn't resume by itself. Someone else must clear the 624 * flag and call this function. 625 */ 626 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 627 KASSERT(drop == 0, 628 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 629 return; 630 } 631 632 do { 633 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 634 max_imm = max_imm_payload(tx_credits); 635 max_nsegs = max_dsgl_nsegs(tx_credits); 636 637 SOCKBUF_LOCK(sb); 638 sowwakeup = drop; 639 if (drop) { 640 sbdrop_locked(sb, drop); 641 drop = 0; 642 } 643 sb_sndptr = sb->sb_sndptr; 644 sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb; 645 plen = 0; 646 nsegs = 0; 647 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 648 for (m = sndptr; m != NULL; m = m->m_next) { 649 int n = sglist_count(mtod(m, void *), m->m_len); 650 651 nsegs += n; 652 plen += m->m_len; 653 654 /* This mbuf sent us _over_ the nsegs limit, back out */ 655 if (plen > max_imm && nsegs > max_nsegs) { 656 nsegs -= n; 657 plen -= m->m_len; 658 if (plen == 0) { 659 /* Too few credits */ 660 toep->flags |= TPF_TX_SUSPENDED; 661 if (sowwakeup) 662 sowwakeup_locked(so); 663 else 664 SOCKBUF_UNLOCK(sb); 665 SOCKBUF_UNLOCK_ASSERT(sb); 666 return; 667 } 668 break; 669 } 670 671 if (max_nsegs_1mbuf < n) 672 max_nsegs_1mbuf = n; 673 sb_sndptr = m; /* new sb->sb_sndptr if all goes well */ 674 675 /* This mbuf put us right at the max_nsegs limit */ 676 if (plen > max_imm && nsegs == max_nsegs) { 677 m = m->m_next; 678 break; 679 } 680 } 681 682 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME); 683 space = sbspace(sb); 684 685 if (space <= sb->sb_hiwat * 3 / 8 && 686 toep->plen_nocompl + plen >= sb->sb_hiwat / 4) 687 compl = 1; 688 else 689 compl = 0; 690 691 if (sb->sb_flags & SB_AUTOSIZE && 692 V_tcp_do_autosndbuf && 693 sb->sb_hiwat < V_tcp_autosndbuf_max && 694 space < sb->sb_hiwat / 8) { 695 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc, 696 V_tcp_autosndbuf_max); 697 698 if (!sbreserve_locked(sb, newsize, so, NULL)) 699 sb->sb_flags &= ~SB_AUTOSIZE; 700 else 701 sowwakeup = 1; /* room available */ 702 } 703 if (sowwakeup) 704 sowwakeup_locked(so); 705 else 706 SOCKBUF_UNLOCK(sb); 707 SOCKBUF_UNLOCK_ASSERT(sb); 708 709 /* nothing to send */ 710 if (plen == 0) { 711 KASSERT(m == NULL, 712 ("%s: nothing to send, but m != NULL", __func__)); 713 break; 714 } 715 716 if (__predict_false(toep->flags & TPF_FIN_SENT)) 717 panic("%s: excess tx.", __func__); 718 719 if (plen <= max_imm) { 720 721 /* Immediate data tx */ 722 723 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 724 toep->ofld_txq); 725 if (wr == NULL) { 726 /* XXX: how will we recover from this? */ 727 toep->flags |= TPF_TX_SUSPENDED; 728 return; 729 } 730 txwr = wrtod(wr); 731 credits = howmany(wr->wr_len, 16); 732 write_tx_wr(txwr, toep, plen, plen, credits, shove, 0, 733 sc->tt.tx_align); 734 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 735 nsegs = 0; 736 } else { 737 int wr_len; 738 739 /* DSGL tx */ 740 741 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 742 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 743 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 744 if (wr == NULL) { 745 /* XXX: how will we recover from this? */ 746 toep->flags |= TPF_TX_SUSPENDED; 747 return; 748 } 749 txwr = wrtod(wr); 750 credits = howmany(wr_len, 16); 751 write_tx_wr(txwr, toep, 0, plen, credits, shove, 0, 752 sc->tt.tx_align); 753 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 754 max_nsegs_1mbuf); 755 if (wr_len & 0xf) { 756 uint64_t *pad = (uint64_t *) 757 ((uintptr_t)txwr + wr_len); 758 *pad = 0; 759 } 760 } 761 762 KASSERT(toep->tx_credits >= credits, 763 ("%s: not enough credits", __func__)); 764 765 toep->tx_credits -= credits; 766 toep->tx_nocompl += credits; 767 toep->plen_nocompl += plen; 768 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 769 toep->tx_nocompl >= toep->tx_total / 4) 770 compl = 1; 771 772 if (compl || toep->ulp_mode == ULP_MODE_RDMA) { 773 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 774 toep->tx_nocompl = 0; 775 toep->plen_nocompl = 0; 776 } 777 778 tp->snd_nxt += plen; 779 tp->snd_max += plen; 780 781 SOCKBUF_LOCK(sb); 782 KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__)); 783 sb->sb_sndptr = sb_sndptr; 784 SOCKBUF_UNLOCK(sb); 785 786 toep->flags |= TPF_TX_DATA_SENT; 787 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 788 toep->flags |= TPF_TX_SUSPENDED; 789 790 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 791 txsd->plen = plen; 792 txsd->tx_credits = credits; 793 txsd++; 794 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 795 toep->txsd_pidx = 0; 796 txsd = &toep->txsd[0]; 797 } 798 toep->txsd_avail--; 799 800 t4_l2t_send(sc, wr, toep->l2te); 801 } while (m != NULL); 802 803 /* Send a FIN if requested, but only if there's no more data to send */ 804 if (m == NULL && toep->flags & TPF_SEND_FIN) 805 close_conn(sc, toep); 806 } 807 808 /* Send ULP data over TOE using TX_DATA_WR. We send whole mbuf at once */ 809 void 810 t4_ulp_push_frames(struct adapter *sc, struct toepcb *toep, int drop) 811 { 812 struct mbuf *sndptr, *m = NULL; 813 struct fw_ofld_tx_data_wr *txwr; 814 struct wrqe *wr; 815 unsigned int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 816 struct inpcb *inp = toep->inp; 817 struct tcpcb *tp; 818 struct socket *so; 819 struct sockbuf *sb; 820 int tx_credits, ulp_len = 0, ulp_mode = 0, qlen = 0; 821 int shove, compl; 822 struct ofld_tx_sdesc *txsd; 823 824 INP_WLOCK_ASSERT(inp); 825 if (toep->flags & TPF_ABORT_SHUTDOWN) 826 return; 827 828 tp = intotcpcb(inp); 829 so = inp->inp_socket; 830 sb = &so->so_snd; 831 txsd = &toep->txsd[toep->txsd_pidx]; 832 833 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 834 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 835 836 /* 837 * This function doesn't resume by itself. Someone else must clear the 838 * flag and call this function. 839 */ 840 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) 841 return; 842 843 sndptr = t4_queue_iscsi_callback(so, toep, 1, &qlen); 844 if (!qlen) 845 return; 846 847 do { 848 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 849 max_imm = max_imm_payload(tx_credits); 850 max_nsegs = max_dsgl_nsegs(tx_credits); 851 852 if (drop) { 853 t4_cpl_iscsi_callback(toep->td, toep, &drop, 854 CPL_FW4_ACK); 855 drop = 0; 856 } 857 858 plen = 0; 859 nsegs = 0; 860 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 861 for (m = sndptr; m != NULL; m = m->m_next) { 862 int n = sglist_count(mtod(m, void *), m->m_len); 863 864 nsegs += n; 865 plen += m->m_len; 866 867 /* This mbuf sent us _over_ the nsegs limit, return */ 868 if (plen > max_imm && nsegs > max_nsegs) { 869 toep->flags |= TPF_TX_SUSPENDED; 870 return; 871 } 872 873 if (max_nsegs_1mbuf < n) 874 max_nsegs_1mbuf = n; 875 876 /* This mbuf put us right at the max_nsegs limit */ 877 if (plen > max_imm && nsegs == max_nsegs) { 878 toep->flags |= TPF_TX_SUSPENDED; 879 return; 880 } 881 } 882 883 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME); 884 /* nothing to send */ 885 if (plen == 0) { 886 KASSERT(m == NULL, 887 ("%s: nothing to send, but m != NULL", __func__)); 888 break; 889 } 890 891 if (__predict_false(toep->flags & TPF_FIN_SENT)) 892 panic("%s: excess tx.", __func__); 893 894 ulp_len = plen + ulp_extra_len(sndptr, &ulp_mode); 895 if (plen <= max_imm) { 896 897 /* Immediate data tx */ 898 wr = alloc_wrqe(roundup(sizeof(*txwr) + plen, 16), 899 toep->ofld_txq); 900 if (wr == NULL) { 901 /* XXX: how will we recover from this? */ 902 toep->flags |= TPF_TX_SUSPENDED; 903 return; 904 } 905 txwr = wrtod(wr); 906 credits = howmany(wr->wr_len, 16); 907 write_tx_wr(txwr, toep, plen, ulp_len, credits, shove, 908 ulp_mode, 0); 909 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 910 } else { 911 int wr_len; 912 913 /* DSGL tx */ 914 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 915 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 916 wr = alloc_wrqe(roundup(wr_len, 16), toep->ofld_txq); 917 if (wr == NULL) { 918 /* XXX: how will we recover from this? */ 919 toep->flags |= TPF_TX_SUSPENDED; 920 return; 921 } 922 txwr = wrtod(wr); 923 credits = howmany(wr_len, 16); 924 write_tx_wr(txwr, toep, 0, ulp_len, credits, shove, 925 ulp_mode, 0); 926 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 927 max_nsegs_1mbuf); 928 if (wr_len & 0xf) { 929 uint64_t *pad = (uint64_t *) 930 ((uintptr_t)txwr + wr_len); 931 *pad = 0; 932 } 933 } 934 935 KASSERT(toep->tx_credits >= credits, 936 ("%s: not enough credits", __func__)); 937 938 toep->tx_credits -= credits; 939 toep->tx_nocompl += credits; 940 toep->plen_nocompl += plen; 941 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 942 toep->tx_nocompl >= toep->tx_total / 4) 943 compl = 1; 944 945 if (compl) { 946 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 947 toep->tx_nocompl = 0; 948 toep->plen_nocompl = 0; 949 } 950 tp->snd_nxt += ulp_len; 951 tp->snd_max += ulp_len; 952 953 /* goto next mbuf */ 954 sndptr = m = t4_queue_iscsi_callback(so, toep, 2, &qlen); 955 956 toep->flags |= TPF_TX_DATA_SENT; 957 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) { 958 toep->flags |= TPF_TX_SUSPENDED; 959 } 960 961 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 962 txsd->plen = plen; 963 txsd->tx_credits = credits; 964 txsd++; 965 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 966 toep->txsd_pidx = 0; 967 txsd = &toep->txsd[0]; 968 } 969 toep->txsd_avail--; 970 971 t4_l2t_send(sc, wr, toep->l2te); 972 } while (m != NULL); 973 974 /* Send a FIN if requested, but only if there's no more data to send */ 975 if (m == NULL && toep->flags & TPF_SEND_FIN) 976 close_conn(sc, toep); 977 } 978 979 int 980 t4_tod_output(struct toedev *tod, struct tcpcb *tp) 981 { 982 struct adapter *sc = tod->tod_softc; 983 #ifdef INVARIANTS 984 struct inpcb *inp = tp->t_inpcb; 985 #endif 986 struct toepcb *toep = tp->t_toe; 987 988 INP_WLOCK_ASSERT(inp); 989 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 990 ("%s: inp %p dropped.", __func__, inp)); 991 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 992 993 t4_push_frames(sc, toep, 0); 994 995 return (0); 996 } 997 998 int 999 t4_send_fin(struct toedev *tod, struct tcpcb *tp) 1000 { 1001 struct adapter *sc = tod->tod_softc; 1002 #ifdef INVARIANTS 1003 struct inpcb *inp = tp->t_inpcb; 1004 #endif 1005 struct toepcb *toep = tp->t_toe; 1006 1007 INP_WLOCK_ASSERT(inp); 1008 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1009 ("%s: inp %p dropped.", __func__, inp)); 1010 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1011 1012 toep->flags |= TPF_SEND_FIN; 1013 if (tp->t_state >= TCPS_ESTABLISHED) { 1014 if (toep->ulp_mode == ULP_MODE_ISCSI) 1015 t4_ulp_push_frames(sc, toep, 0); 1016 else 1017 t4_push_frames(sc, toep, 0); 1018 } 1019 1020 return (0); 1021 } 1022 1023 int 1024 t4_send_rst(struct toedev *tod, struct tcpcb *tp) 1025 { 1026 struct adapter *sc = tod->tod_softc; 1027 #if defined(INVARIANTS) 1028 struct inpcb *inp = tp->t_inpcb; 1029 #endif 1030 struct toepcb *toep = tp->t_toe; 1031 1032 INP_WLOCK_ASSERT(inp); 1033 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1034 ("%s: inp %p dropped.", __func__, inp)); 1035 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1036 1037 /* hmmmm */ 1038 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 1039 ("%s: flowc for tid %u [%s] not sent already", 1040 __func__, toep->tid, tcpstates[tp->t_state])); 1041 1042 send_reset(sc, toep, 0); 1043 return (0); 1044 } 1045 1046 /* 1047 * Peer has sent us a FIN. 1048 */ 1049 static int 1050 do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1051 { 1052 struct adapter *sc = iq->adapter; 1053 const struct cpl_peer_close *cpl = (const void *)(rss + 1); 1054 unsigned int tid = GET_TID(cpl); 1055 struct toepcb *toep = lookup_tid(sc, tid); 1056 struct inpcb *inp = toep->inp; 1057 struct tcpcb *tp = NULL; 1058 struct socket *so; 1059 struct sockbuf *sb; 1060 #ifdef INVARIANTS 1061 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1062 #endif 1063 1064 KASSERT(opcode == CPL_PEER_CLOSE, 1065 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1066 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1067 1068 if (__predict_false(toep->flags & TPF_SYNQE)) { 1069 #ifdef INVARIANTS 1070 struct synq_entry *synqe = (void *)toep; 1071 1072 INP_WLOCK(synqe->lctx->inp); 1073 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1074 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1075 ("%s: listen socket closed but tid %u not aborted.", 1076 __func__, tid)); 1077 } else { 1078 /* 1079 * do_pass_accept_req is still running and will 1080 * eventually take care of this tid. 1081 */ 1082 } 1083 INP_WUNLOCK(synqe->lctx->inp); 1084 #endif 1085 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1086 toep, toep->flags); 1087 return (0); 1088 } 1089 1090 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1091 1092 INP_INFO_WLOCK(&V_tcbinfo); 1093 INP_WLOCK(inp); 1094 tp = intotcpcb(inp); 1095 1096 CTR5(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x, inp %p", __func__, 1097 tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, inp); 1098 1099 if (toep->flags & TPF_ABORT_SHUTDOWN) 1100 goto done; 1101 1102 tp->rcv_nxt++; /* FIN */ 1103 1104 so = inp->inp_socket; 1105 sb = &so->so_rcv; 1106 SOCKBUF_LOCK(sb); 1107 if (__predict_false(toep->ddp_flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE))) { 1108 handle_ddp_close(toep, tp, sb, cpl->rcv_nxt); 1109 } 1110 socantrcvmore_locked(so); /* unlocks the sockbuf */ 1111 1112 if (toep->ulp_mode != ULP_MODE_RDMA) { 1113 KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt), 1114 ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt, 1115 be32toh(cpl->rcv_nxt))); 1116 } 1117 1118 switch (tp->t_state) { 1119 case TCPS_SYN_RECEIVED: 1120 tp->t_starttime = ticks; 1121 /* FALLTHROUGH */ 1122 1123 case TCPS_ESTABLISHED: 1124 tp->t_state = TCPS_CLOSE_WAIT; 1125 break; 1126 1127 case TCPS_FIN_WAIT_1: 1128 tp->t_state = TCPS_CLOSING; 1129 break; 1130 1131 case TCPS_FIN_WAIT_2: 1132 tcp_twstart(tp); 1133 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1134 INP_INFO_WUNLOCK(&V_tcbinfo); 1135 1136 INP_WLOCK(inp); 1137 final_cpl_received(toep); 1138 return (0); 1139 1140 default: 1141 log(LOG_ERR, "%s: TID %u received CPL_PEER_CLOSE in state %d\n", 1142 __func__, tid, tp->t_state); 1143 } 1144 done: 1145 INP_WUNLOCK(inp); 1146 INP_INFO_WUNLOCK(&V_tcbinfo); 1147 return (0); 1148 } 1149 1150 /* 1151 * Peer has ACK'd our FIN. 1152 */ 1153 static int 1154 do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss, 1155 struct mbuf *m) 1156 { 1157 struct adapter *sc = iq->adapter; 1158 const struct cpl_close_con_rpl *cpl = (const void *)(rss + 1); 1159 unsigned int tid = GET_TID(cpl); 1160 struct toepcb *toep = lookup_tid(sc, tid); 1161 struct inpcb *inp = toep->inp; 1162 struct tcpcb *tp = NULL; 1163 struct socket *so = NULL; 1164 #ifdef INVARIANTS 1165 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1166 #endif 1167 1168 KASSERT(opcode == CPL_CLOSE_CON_RPL, 1169 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1170 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1171 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1172 1173 INP_INFO_WLOCK(&V_tcbinfo); 1174 INP_WLOCK(inp); 1175 tp = intotcpcb(inp); 1176 1177 CTR4(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x", 1178 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags); 1179 1180 if (toep->flags & TPF_ABORT_SHUTDOWN) 1181 goto done; 1182 1183 so = inp->inp_socket; 1184 tp->snd_una = be32toh(cpl->snd_nxt) - 1; /* exclude FIN */ 1185 1186 switch (tp->t_state) { 1187 case TCPS_CLOSING: /* see TCPS_FIN_WAIT_2 in do_peer_close too */ 1188 tcp_twstart(tp); 1189 release: 1190 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1191 INP_INFO_WUNLOCK(&V_tcbinfo); 1192 1193 INP_WLOCK(inp); 1194 final_cpl_received(toep); /* no more CPLs expected */ 1195 1196 return (0); 1197 case TCPS_LAST_ACK: 1198 if (tcp_close(tp)) 1199 INP_WUNLOCK(inp); 1200 goto release; 1201 1202 case TCPS_FIN_WAIT_1: 1203 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 1204 soisdisconnected(so); 1205 tp->t_state = TCPS_FIN_WAIT_2; 1206 break; 1207 1208 default: 1209 log(LOG_ERR, 1210 "%s: TID %u received CPL_CLOSE_CON_RPL in state %s\n", 1211 __func__, tid, tcpstates[tp->t_state]); 1212 } 1213 done: 1214 INP_WUNLOCK(inp); 1215 INP_INFO_WUNLOCK(&V_tcbinfo); 1216 return (0); 1217 } 1218 1219 void 1220 send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid, 1221 int rst_status) 1222 { 1223 struct wrqe *wr; 1224 struct cpl_abort_rpl *cpl; 1225 1226 wr = alloc_wrqe(sizeof(*cpl), ofld_txq); 1227 if (wr == NULL) { 1228 /* XXX */ 1229 panic("%s: allocation failure.", __func__); 1230 } 1231 cpl = wrtod(wr); 1232 1233 INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid); 1234 cpl->cmd = rst_status; 1235 1236 t4_wrq_tx(sc, wr); 1237 } 1238 1239 static int 1240 abort_status_to_errno(struct tcpcb *tp, unsigned int abort_reason) 1241 { 1242 switch (abort_reason) { 1243 case CPL_ERR_BAD_SYN: 1244 case CPL_ERR_CONN_RESET: 1245 return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET); 1246 case CPL_ERR_XMIT_TIMEDOUT: 1247 case CPL_ERR_PERSIST_TIMEDOUT: 1248 case CPL_ERR_FINWAIT2_TIMEDOUT: 1249 case CPL_ERR_KEEPALIVE_TIMEDOUT: 1250 return (ETIMEDOUT); 1251 default: 1252 return (EIO); 1253 } 1254 } 1255 1256 int 1257 cpl_not_handled(struct sge_iq *, const struct rss_header *, struct mbuf *); 1258 /* 1259 * tom_cpl_iscsi_callback - 1260 * iscsi and tom would share the following cpl messages, so when any of these 1261 * message is received, after tom is done with processing it, the messages 1262 * needs to be forwarded to iscsi for further processing: 1263 * - CPL_SET_TCB_RPL 1264 * - CPL_RX_DATA_DDP 1265 */ 1266 void (*tom_cpl_iscsi_callback)(struct tom_data *, struct socket *, void *, 1267 unsigned int); 1268 1269 struct mbuf *(*tom_queue_iscsi_callback)(struct socket *, unsigned int, int *); 1270 /* 1271 * Check if the handler function is set for a given CPL 1272 * return 0 if the function is NULL or cpl_not_handled, 1 otherwise. 1273 */ 1274 int 1275 t4tom_cpl_handler_registered(struct adapter *sc, unsigned int opcode) 1276 { 1277 1278 MPASS(opcode < nitems(sc->cpl_handler)); 1279 1280 return (sc->cpl_handler[opcode] && 1281 sc->cpl_handler[opcode] != cpl_not_handled); 1282 } 1283 1284 /* 1285 * set the tom_cpl_iscsi_callback function, this function should be used 1286 * whenever both toe and iscsi need to process the same cpl msg. 1287 */ 1288 void 1289 t4tom_register_cpl_iscsi_callback(void (*fp)(struct tom_data *, struct socket *, 1290 void *, unsigned int)) 1291 { 1292 1293 tom_cpl_iscsi_callback = fp; 1294 } 1295 1296 void 1297 t4tom_register_queue_iscsi_callback(struct mbuf *(*fp)(struct socket *, 1298 unsigned int, int *qlen)) 1299 { 1300 1301 tom_queue_iscsi_callback = fp; 1302 } 1303 1304 int 1305 t4_cpl_iscsi_callback(struct tom_data *td, struct toepcb *toep, void *m, 1306 unsigned int opcode) 1307 { 1308 struct socket *so; 1309 1310 if (opcode == CPL_FW4_ACK) 1311 so = toep->inp->inp_socket; 1312 else { 1313 INP_WLOCK(toep->inp); 1314 so = toep->inp->inp_socket; 1315 INP_WUNLOCK(toep->inp); 1316 } 1317 1318 if (tom_cpl_iscsi_callback && so) { 1319 if (toep->ulp_mode == ULP_MODE_ISCSI) { 1320 tom_cpl_iscsi_callback(td, so, m, opcode); 1321 return (0); 1322 } 1323 } 1324 1325 return (1); 1326 } 1327 1328 struct mbuf * 1329 t4_queue_iscsi_callback(struct socket *so, struct toepcb *toep, 1330 unsigned int cmd, int *qlen) 1331 { 1332 1333 if (tom_queue_iscsi_callback && so) { 1334 if (toep->ulp_mode == ULP_MODE_ISCSI) 1335 return (tom_queue_iscsi_callback(so, cmd, qlen)); 1336 } 1337 1338 return (NULL); 1339 } 1340 1341 /* 1342 * TCP RST from the peer, timeout, or some other such critical error. 1343 */ 1344 static int 1345 do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1346 { 1347 struct adapter *sc = iq->adapter; 1348 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); 1349 unsigned int tid = GET_TID(cpl); 1350 struct toepcb *toep = lookup_tid(sc, tid); 1351 struct sge_wrq *ofld_txq = toep->ofld_txq; 1352 struct inpcb *inp; 1353 struct tcpcb *tp; 1354 #ifdef INVARIANTS 1355 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1356 #endif 1357 1358 KASSERT(opcode == CPL_ABORT_REQ_RSS, 1359 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1360 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1361 1362 if (toep->flags & TPF_SYNQE) 1363 return (do_abort_req_synqe(iq, rss, m)); 1364 1365 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1366 1367 if (negative_advice(cpl->status)) { 1368 CTR4(KTR_CXGBE, "%s: negative advice %d for tid %d (0x%x)", 1369 __func__, cpl->status, tid, toep->flags); 1370 return (0); /* Ignore negative advice */ 1371 } 1372 1373 inp = toep->inp; 1374 INP_INFO_WLOCK(&V_tcbinfo); /* for tcp_close */ 1375 INP_WLOCK(inp); 1376 1377 tp = intotcpcb(inp); 1378 1379 CTR6(KTR_CXGBE, 1380 "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x, status %d", 1381 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, 1382 inp->inp_flags, cpl->status); 1383 1384 /* 1385 * If we'd initiated an abort earlier the reply to it is responsible for 1386 * cleaning up resources. Otherwise we tear everything down right here 1387 * right now. We owe the T4 a CPL_ABORT_RPL no matter what. 1388 */ 1389 if (toep->flags & TPF_ABORT_SHUTDOWN) { 1390 INP_WUNLOCK(inp); 1391 goto done; 1392 } 1393 toep->flags |= TPF_ABORT_SHUTDOWN; 1394 1395 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 1396 struct socket *so = inp->inp_socket; 1397 1398 if (so != NULL) 1399 so_error_set(so, abort_status_to_errno(tp, 1400 cpl->status)); 1401 tp = tcp_close(tp); 1402 if (tp == NULL) 1403 INP_WLOCK(inp); /* re-acquire */ 1404 } 1405 1406 final_cpl_received(toep); 1407 done: 1408 INP_INFO_WUNLOCK(&V_tcbinfo); 1409 send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST); 1410 return (0); 1411 } 1412 1413 /* 1414 * Reply to the CPL_ABORT_REQ (send_reset) 1415 */ 1416 static int 1417 do_abort_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1418 { 1419 struct adapter *sc = iq->adapter; 1420 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); 1421 unsigned int tid = GET_TID(cpl); 1422 struct toepcb *toep = lookup_tid(sc, tid); 1423 struct inpcb *inp = toep->inp; 1424 #ifdef INVARIANTS 1425 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1426 #endif 1427 1428 KASSERT(opcode == CPL_ABORT_RPL_RSS, 1429 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1430 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1431 1432 if (toep->flags & TPF_SYNQE) 1433 return (do_abort_rpl_synqe(iq, rss, m)); 1434 1435 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1436 1437 CTR5(KTR_CXGBE, "%s: tid %u, toep %p, inp %p, status %d", 1438 __func__, tid, toep, inp, cpl->status); 1439 1440 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1441 ("%s: wasn't expecting abort reply", __func__)); 1442 1443 INP_WLOCK(inp); 1444 final_cpl_received(toep); 1445 1446 return (0); 1447 } 1448 1449 static int 1450 do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1451 { 1452 struct adapter *sc = iq->adapter; 1453 const struct cpl_rx_data *cpl = mtod(m, const void *); 1454 unsigned int tid = GET_TID(cpl); 1455 struct toepcb *toep = lookup_tid(sc, tid); 1456 struct inpcb *inp = toep->inp; 1457 struct tcpcb *tp; 1458 struct socket *so; 1459 struct sockbuf *sb; 1460 int len; 1461 uint32_t ddp_placed = 0; 1462 1463 if (__predict_false(toep->flags & TPF_SYNQE)) { 1464 #ifdef INVARIANTS 1465 struct synq_entry *synqe = (void *)toep; 1466 1467 INP_WLOCK(synqe->lctx->inp); 1468 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1469 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1470 ("%s: listen socket closed but tid %u not aborted.", 1471 __func__, tid)); 1472 } else { 1473 /* 1474 * do_pass_accept_req is still running and will 1475 * eventually take care of this tid. 1476 */ 1477 } 1478 INP_WUNLOCK(synqe->lctx->inp); 1479 #endif 1480 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1481 toep, toep->flags); 1482 m_freem(m); 1483 return (0); 1484 } 1485 1486 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1487 1488 /* strip off CPL header */ 1489 m_adj(m, sizeof(*cpl)); 1490 len = m->m_pkthdr.len; 1491 1492 INP_WLOCK(inp); 1493 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) { 1494 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", 1495 __func__, tid, len, inp->inp_flags); 1496 INP_WUNLOCK(inp); 1497 m_freem(m); 1498 return (0); 1499 } 1500 1501 tp = intotcpcb(inp); 1502 1503 if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq))) 1504 ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt; 1505 1506 tp->rcv_nxt += len; 1507 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); 1508 tp->rcv_wnd -= len; 1509 tp->t_rcvtime = ticks; 1510 1511 so = inp_inpcbtosocket(inp); 1512 sb = &so->so_rcv; 1513 SOCKBUF_LOCK(sb); 1514 1515 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 1516 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)", 1517 __func__, tid, len); 1518 m_freem(m); 1519 SOCKBUF_UNLOCK(sb); 1520 INP_WUNLOCK(inp); 1521 1522 INP_INFO_WLOCK(&V_tcbinfo); 1523 INP_WLOCK(inp); 1524 tp = tcp_drop(tp, ECONNRESET); 1525 if (tp) 1526 INP_WUNLOCK(inp); 1527 INP_INFO_WUNLOCK(&V_tcbinfo); 1528 1529 return (0); 1530 } 1531 1532 /* receive buffer autosize */ 1533 if (sb->sb_flags & SB_AUTOSIZE && 1534 V_tcp_do_autorcvbuf && 1535 sb->sb_hiwat < V_tcp_autorcvbuf_max && 1536 len > (sbspace(sb) / 8 * 7)) { 1537 unsigned int hiwat = sb->sb_hiwat; 1538 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc, 1539 V_tcp_autorcvbuf_max); 1540 1541 if (!sbreserve_locked(sb, newsize, so, NULL)) 1542 sb->sb_flags &= ~SB_AUTOSIZE; 1543 else 1544 toep->rx_credits += newsize - hiwat; 1545 } 1546 1547 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1548 int changed = !(toep->ddp_flags & DDP_ON) ^ cpl->ddp_off; 1549 1550 if (changed) { 1551 if (toep->ddp_flags & DDP_SC_REQ) 1552 toep->ddp_flags ^= DDP_ON | DDP_SC_REQ; 1553 else { 1554 KASSERT(cpl->ddp_off == 1, 1555 ("%s: DDP switched on by itself.", 1556 __func__)); 1557 1558 /* Fell out of DDP mode */ 1559 toep->ddp_flags &= ~(DDP_ON | DDP_BUF0_ACTIVE | 1560 DDP_BUF1_ACTIVE); 1561 1562 if (ddp_placed) 1563 insert_ddp_data(toep, ddp_placed); 1564 } 1565 } 1566 1567 if ((toep->ddp_flags & DDP_OK) == 0 && 1568 time_uptime >= toep->ddp_disabled + DDP_RETRY_WAIT) { 1569 toep->ddp_score = DDP_LOW_SCORE; 1570 toep->ddp_flags |= DDP_OK; 1571 CTR3(KTR_CXGBE, "%s: tid %u DDP_OK @ %u", 1572 __func__, tid, time_uptime); 1573 } 1574 1575 if (toep->ddp_flags & DDP_ON) { 1576 1577 /* 1578 * CPL_RX_DATA with DDP on can only be an indicate. Ask 1579 * soreceive to post a buffer or disable DDP. The 1580 * payload that arrived in this indicate is appended to 1581 * the socket buffer as usual. 1582 */ 1583 1584 #if 0 1585 CTR5(KTR_CXGBE, 1586 "%s: tid %u (0x%x) DDP indicate (seq 0x%x, len %d)", 1587 __func__, tid, toep->flags, be32toh(cpl->seq), len); 1588 #endif 1589 sb->sb_flags |= SB_DDP_INDICATE; 1590 } else if ((toep->ddp_flags & (DDP_OK|DDP_SC_REQ)) == DDP_OK && 1591 tp->rcv_wnd > DDP_RSVD_WIN && len >= sc->tt.ddp_thres) { 1592 1593 /* 1594 * DDP allowed but isn't on (and a request to switch it 1595 * on isn't pending either), and conditions are ripe for 1596 * it to work. Switch it on. 1597 */ 1598 1599 enable_ddp(sc, toep); 1600 } 1601 } 1602 1603 KASSERT(toep->sb_cc >= sbused(sb), 1604 ("%s: sb %p has more data (%d) than last time (%d).", 1605 __func__, sb, sbused(sb), toep->sb_cc)); 1606 toep->rx_credits += toep->sb_cc - sbused(sb); 1607 sbappendstream_locked(sb, m, 0); 1608 toep->sb_cc = sbused(sb); 1609 sorwakeup_locked(so); 1610 SOCKBUF_UNLOCK_ASSERT(sb); 1611 1612 INP_WUNLOCK(inp); 1613 return (0); 1614 } 1615 1616 #define S_CPL_FW4_ACK_OPCODE 24 1617 #define M_CPL_FW4_ACK_OPCODE 0xff 1618 #define V_CPL_FW4_ACK_OPCODE(x) ((x) << S_CPL_FW4_ACK_OPCODE) 1619 #define G_CPL_FW4_ACK_OPCODE(x) \ 1620 (((x) >> S_CPL_FW4_ACK_OPCODE) & M_CPL_FW4_ACK_OPCODE) 1621 1622 #define S_CPL_FW4_ACK_FLOWID 0 1623 #define M_CPL_FW4_ACK_FLOWID 0xffffff 1624 #define V_CPL_FW4_ACK_FLOWID(x) ((x) << S_CPL_FW4_ACK_FLOWID) 1625 #define G_CPL_FW4_ACK_FLOWID(x) \ 1626 (((x) >> S_CPL_FW4_ACK_FLOWID) & M_CPL_FW4_ACK_FLOWID) 1627 1628 #define S_CPL_FW4_ACK_CR 24 1629 #define M_CPL_FW4_ACK_CR 0xff 1630 #define V_CPL_FW4_ACK_CR(x) ((x) << S_CPL_FW4_ACK_CR) 1631 #define G_CPL_FW4_ACK_CR(x) (((x) >> S_CPL_FW4_ACK_CR) & M_CPL_FW4_ACK_CR) 1632 1633 #define S_CPL_FW4_ACK_SEQVAL 0 1634 #define M_CPL_FW4_ACK_SEQVAL 0x1 1635 #define V_CPL_FW4_ACK_SEQVAL(x) ((x) << S_CPL_FW4_ACK_SEQVAL) 1636 #define G_CPL_FW4_ACK_SEQVAL(x) \ 1637 (((x) >> S_CPL_FW4_ACK_SEQVAL) & M_CPL_FW4_ACK_SEQVAL) 1638 #define F_CPL_FW4_ACK_SEQVAL V_CPL_FW4_ACK_SEQVAL(1U) 1639 1640 static int 1641 do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1642 { 1643 struct adapter *sc = iq->adapter; 1644 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 1645 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 1646 struct toepcb *toep = lookup_tid(sc, tid); 1647 struct inpcb *inp; 1648 struct tcpcb *tp; 1649 struct socket *so; 1650 uint8_t credits = cpl->credits; 1651 struct ofld_tx_sdesc *txsd; 1652 int plen; 1653 #ifdef INVARIANTS 1654 unsigned int opcode = G_CPL_FW4_ACK_OPCODE(be32toh(OPCODE_TID(cpl))); 1655 #endif 1656 1657 /* 1658 * Very unusual case: we'd sent a flowc + abort_req for a synq entry and 1659 * now this comes back carrying the credits for the flowc. 1660 */ 1661 if (__predict_false(toep->flags & TPF_SYNQE)) { 1662 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1663 ("%s: credits for a synq entry %p", __func__, toep)); 1664 return (0); 1665 } 1666 1667 inp = toep->inp; 1668 1669 KASSERT(opcode == CPL_FW4_ACK, 1670 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1671 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1672 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1673 1674 INP_WLOCK(inp); 1675 1676 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) { 1677 INP_WUNLOCK(inp); 1678 return (0); 1679 } 1680 1681 KASSERT((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0, 1682 ("%s: inp_flags 0x%x", __func__, inp->inp_flags)); 1683 1684 tp = intotcpcb(inp); 1685 1686 if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) { 1687 tcp_seq snd_una = be32toh(cpl->snd_una); 1688 1689 #ifdef INVARIANTS 1690 if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) { 1691 log(LOG_ERR, 1692 "%s: unexpected seq# %x for TID %u, snd_una %x\n", 1693 __func__, snd_una, toep->tid, tp->snd_una); 1694 } 1695 #endif 1696 1697 if (tp->snd_una != snd_una) { 1698 tp->snd_una = snd_una; 1699 tp->ts_recent_age = tcp_ts_getticks(); 1700 } 1701 } 1702 1703 so = inp->inp_socket; 1704 txsd = &toep->txsd[toep->txsd_cidx]; 1705 plen = 0; 1706 while (credits) { 1707 KASSERT(credits >= txsd->tx_credits, 1708 ("%s: too many (or partial) credits", __func__)); 1709 credits -= txsd->tx_credits; 1710 toep->tx_credits += txsd->tx_credits; 1711 plen += txsd->plen; 1712 txsd++; 1713 toep->txsd_avail++; 1714 KASSERT(toep->txsd_avail <= toep->txsd_total, 1715 ("%s: txsd avail > total", __func__)); 1716 if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) { 1717 txsd = &toep->txsd[0]; 1718 toep->txsd_cidx = 0; 1719 } 1720 } 1721 1722 if (toep->tx_credits == toep->tx_total) { 1723 toep->tx_nocompl = 0; 1724 toep->plen_nocompl = 0; 1725 } 1726 1727 if (toep->flags & TPF_TX_SUSPENDED && 1728 toep->tx_credits >= toep->tx_total / 4) { 1729 toep->flags &= ~TPF_TX_SUSPENDED; 1730 if (toep->ulp_mode == ULP_MODE_ISCSI) 1731 t4_ulp_push_frames(sc, toep, plen); 1732 else 1733 t4_push_frames(sc, toep, plen); 1734 } else if (plen > 0) { 1735 struct sockbuf *sb = &so->so_snd; 1736 1737 if (toep->ulp_mode == ULP_MODE_ISCSI) 1738 t4_cpl_iscsi_callback(toep->td, toep, &plen, 1739 CPL_FW4_ACK); 1740 else { 1741 SOCKBUF_LOCK(sb); 1742 sbdrop_locked(sb, plen); 1743 sowwakeup_locked(so); 1744 SOCKBUF_UNLOCK_ASSERT(sb); 1745 } 1746 } 1747 1748 INP_WUNLOCK(inp); 1749 1750 return (0); 1751 } 1752 1753 static int 1754 do_set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1755 { 1756 struct adapter *sc = iq->adapter; 1757 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 1758 unsigned int tid = GET_TID(cpl); 1759 #ifdef INVARIANTS 1760 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1761 #endif 1762 1763 KASSERT(opcode == CPL_SET_TCB_RPL, 1764 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1765 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1766 1767 if (is_ftid(sc, tid)) 1768 return (t4_filter_rpl(iq, rss, m)); /* TCB is a filter */ 1769 else { 1770 struct toepcb *toep = lookup_tid(sc, tid); 1771 1772 t4_cpl_iscsi_callback(toep->td, toep, m, CPL_SET_TCB_RPL); 1773 return (0); 1774 } 1775 1776 CXGBE_UNIMPLEMENTED(__func__); 1777 } 1778 1779 void 1780 t4_set_tcb_field(struct adapter *sc, struct toepcb *toep, int ctrl, 1781 uint16_t word, uint64_t mask, uint64_t val) 1782 { 1783 struct wrqe *wr; 1784 struct cpl_set_tcb_field *req; 1785 1786 wr = alloc_wrqe(sizeof(*req), ctrl ? toep->ctrlq : toep->ofld_txq); 1787 if (wr == NULL) { 1788 /* XXX */ 1789 panic("%s: allocation failure.", __func__); 1790 } 1791 req = wrtod(wr); 1792 1793 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid); 1794 req->reply_ctrl = htobe16(V_NO_REPLY(1) | 1795 V_QUEUENO(toep->ofld_rxq->iq.abs_id)); 1796 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0)); 1797 req->mask = htobe64(mask); 1798 req->val = htobe64(val); 1799 1800 t4_wrq_tx(sc, wr); 1801 } 1802 1803 void 1804 t4_init_cpl_io_handlers(struct adapter *sc) 1805 { 1806 1807 t4_register_cpl_handler(sc, CPL_PEER_CLOSE, do_peer_close); 1808 t4_register_cpl_handler(sc, CPL_CLOSE_CON_RPL, do_close_con_rpl); 1809 t4_register_cpl_handler(sc, CPL_ABORT_REQ_RSS, do_abort_req); 1810 t4_register_cpl_handler(sc, CPL_ABORT_RPL_RSS, do_abort_rpl); 1811 t4_register_cpl_handler(sc, CPL_RX_DATA, do_rx_data); 1812 t4_register_cpl_handler(sc, CPL_FW4_ACK, do_fw4_ack); 1813 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, do_set_tcb_rpl); 1814 } 1815 1816 void 1817 t4_uninit_cpl_io_handlers(struct adapter *sc) 1818 { 1819 1820 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl); 1821 } 1822 #endif 1823