1 /*- 2 * Copyright (c) 2012 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 33 #ifdef TCP_OFFLOAD 34 #include <sys/param.h> 35 #include <sys/types.h> 36 #include <sys/kernel.h> 37 #include <sys/ktr.h> 38 #include <sys/module.h> 39 #include <sys/protosw.h> 40 #include <sys/domain.h> 41 #include <sys/socket.h> 42 #include <sys/socketvar.h> 43 #include <sys/sglist.h> 44 #include <netinet/in.h> 45 #include <netinet/in_pcb.h> 46 #include <netinet/ip.h> 47 #include <netinet/ip6.h> 48 #include <netinet/tcp_var.h> 49 #define TCPSTATES 50 #include <netinet/tcp_fsm.h> 51 #include <netinet/tcp_seq.h> 52 #include <netinet/toecore.h> 53 54 #include "common/common.h" 55 #include "common/t4_msg.h" 56 #include "common/t4_regs.h" 57 #include "common/t4_tcb.h" 58 #include "tom/t4_tom_l2t.h" 59 #include "tom/t4_tom.h" 60 61 VNET_DECLARE(int, tcp_do_autosndbuf); 62 #define V_tcp_do_autosndbuf VNET(tcp_do_autosndbuf) 63 VNET_DECLARE(int, tcp_autosndbuf_inc); 64 #define V_tcp_autosndbuf_inc VNET(tcp_autosndbuf_inc) 65 VNET_DECLARE(int, tcp_autosndbuf_max); 66 #define V_tcp_autosndbuf_max VNET(tcp_autosndbuf_max) 67 VNET_DECLARE(int, tcp_do_autorcvbuf); 68 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf) 69 VNET_DECLARE(int, tcp_autorcvbuf_inc); 70 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc) 71 VNET_DECLARE(int, tcp_autorcvbuf_max); 72 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max) 73 74 /* 75 * For ULP connections HW may add headers, e.g., for digests, that aren't part 76 * of the messages sent by the host but that are part of the TCP payload and 77 * therefore consume TCP sequence space. Tx connection parameters that 78 * operate in TCP sequence space are affected by the HW additions and need to 79 * compensate for them to accurately track TCP sequence numbers. This array 80 * contains the compensating extra lengths for ULP packets. It is indexed by 81 * a packet's ULP submode. 82 */ 83 const unsigned int t4_ulp_extra_len[] = {0, 4, 4, 8}; 84 85 /* 86 * Return the length of any HW additions that will be made to a Tx packet. 87 * Such additions can happen for some types of ULP packets. 88 */ 89 static inline unsigned int 90 ulp_extra_len(struct mbuf *m, int *ulp_mode) 91 { 92 struct m_tag *mtag; 93 94 if ((mtag = m_tag_find(m, CXGBE_ISCSI_MBUF_TAG, NULL)) == NULL) 95 return (0); 96 *ulp_mode = *((int *)(mtag + 1)); 97 98 return (t4_ulp_extra_len[*ulp_mode & 3]); 99 } 100 101 void 102 send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp) 103 { 104 struct wrqe *wr; 105 struct fw_flowc_wr *flowc; 106 unsigned int nparams = ftxp ? 8 : 6, flowclen; 107 struct port_info *pi = toep->port; 108 struct adapter *sc = pi->adapter; 109 unsigned int pfvf = G_FW_VIID_PFN(pi->viid) << S_FW_VIID_PFN; 110 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 111 112 KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT), 113 ("%s: flowc for tid %u sent already", __func__, toep->tid)); 114 115 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); 116 117 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq); 118 if (wr == NULL) { 119 /* XXX */ 120 panic("%s: allocation failure.", __func__); 121 } 122 flowc = wrtod(wr); 123 memset(flowc, 0, wr->wr_len); 124 125 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 126 V_FW_FLOWC_WR_NPARAMS(nparams)); 127 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | 128 V_FW_WR_FLOWID(toep->tid)); 129 130 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 131 flowc->mnemval[0].val = htobe32(pfvf); 132 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 133 flowc->mnemval[1].val = htobe32(pi->tx_chan); 134 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 135 flowc->mnemval[2].val = htobe32(pi->tx_chan); 136 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 137 flowc->mnemval[3].val = htobe32(toep->ofld_rxq->iq.abs_id); 138 if (ftxp) { 139 uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf); 140 141 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 142 flowc->mnemval[4].val = htobe32(ftxp->snd_nxt); 143 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 144 flowc->mnemval[5].val = htobe32(ftxp->rcv_nxt); 145 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 146 flowc->mnemval[6].val = htobe32(sndbuf); 147 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 148 flowc->mnemval[7].val = htobe32(ftxp->mss); 149 150 CTR6(KTR_CXGBE, 151 "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x", 152 __func__, toep->tid, ftxp->mss, sndbuf, ftxp->snd_nxt, 153 ftxp->rcv_nxt); 154 } else { 155 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF; 156 flowc->mnemval[4].val = htobe32(512); 157 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS; 158 flowc->mnemval[5].val = htobe32(512); 159 160 CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); 161 } 162 163 txsd->tx_credits = howmany(flowclen, 16); 164 txsd->plen = 0; 165 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 166 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 167 toep->tx_credits -= txsd->tx_credits; 168 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 169 toep->txsd_pidx = 0; 170 toep->txsd_avail--; 171 172 toep->flags |= TPF_FLOWC_WR_SENT; 173 t4_wrq_tx(sc, wr); 174 } 175 176 void 177 send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt) 178 { 179 struct wrqe *wr; 180 struct cpl_abort_req *req; 181 int tid = toep->tid; 182 struct inpcb *inp = toep->inp; 183 struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */ 184 185 INP_WLOCK_ASSERT(inp); 186 187 CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s", 188 __func__, toep->tid, 189 inp->inp_flags & INP_DROPPED ? "inp dropped" : 190 tcpstates[tp->t_state], 191 toep->flags, inp->inp_flags, 192 toep->flags & TPF_ABORT_SHUTDOWN ? 193 " (abort already in progress)" : ""); 194 195 if (toep->flags & TPF_ABORT_SHUTDOWN) 196 return; /* abort already in progress */ 197 198 toep->flags |= TPF_ABORT_SHUTDOWN; 199 200 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 201 ("%s: flowc_wr not sent for tid %d.", __func__, tid)); 202 203 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 204 if (wr == NULL) { 205 /* XXX */ 206 panic("%s: allocation failure.", __func__); 207 } 208 req = wrtod(wr); 209 210 INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid); 211 if (inp->inp_flags & INP_DROPPED) 212 req->rsvd0 = htobe32(snd_nxt); 213 else 214 req->rsvd0 = htobe32(tp->snd_nxt); 215 req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT); 216 req->cmd = CPL_ABORT_SEND_RST; 217 218 /* 219 * XXX: What's the correct way to tell that the inp hasn't been detached 220 * from its socket? Should I even be flushing the snd buffer here? 221 */ 222 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 223 struct socket *so = inp->inp_socket; 224 225 if (so != NULL) /* because I'm not sure. See comment above */ 226 sbflush(&so->so_snd); 227 } 228 229 t4_l2t_send(sc, wr, toep->l2te); 230 } 231 232 /* 233 * Called when a connection is established to translate the TCP options 234 * reported by HW to FreeBSD's native format. 235 */ 236 static void 237 assign_rxopt(struct tcpcb *tp, unsigned int opt) 238 { 239 struct toepcb *toep = tp->t_toe; 240 struct inpcb *inp = tp->t_inpcb; 241 struct adapter *sc = td_adapter(toep->td); 242 int n; 243 244 INP_LOCK_ASSERT(inp); 245 246 if (inp->inp_inc.inc_flags & INC_ISIPV6) 247 n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 248 else 249 n = sizeof(struct ip) + sizeof(struct tcphdr); 250 tp->t_maxseg = tp->t_maxopd = sc->params.mtus[G_TCPOPT_MSS(opt)] - n; 251 252 CTR4(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u)", __func__, toep->tid, 253 G_TCPOPT_MSS(opt), sc->params.mtus[G_TCPOPT_MSS(opt)]); 254 255 if (G_TCPOPT_TSTAMP(opt)) { 256 tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */ 257 tp->ts_recent = 0; /* hmmm */ 258 tp->ts_recent_age = tcp_ts_getticks(); 259 tp->t_maxseg -= TCPOLEN_TSTAMP_APPA; 260 } 261 262 if (G_TCPOPT_SACK(opt)) 263 tp->t_flags |= TF_SACK_PERMIT; /* should already be set */ 264 else 265 tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */ 266 267 if (G_TCPOPT_WSCALE_OK(opt)) 268 tp->t_flags |= TF_RCVD_SCALE; 269 270 /* Doing window scaling? */ 271 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 272 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 273 tp->rcv_scale = tp->request_r_scale; 274 tp->snd_scale = G_TCPOPT_SND_WSCALE(opt); 275 } 276 } 277 278 /* 279 * Completes some final bits of initialization for just established connections 280 * and changes their state to TCPS_ESTABLISHED. 281 * 282 * The ISNs are from after the exchange of SYNs. i.e., the true ISN + 1. 283 */ 284 void 285 make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn, 286 uint16_t opt) 287 { 288 struct inpcb *inp = toep->inp; 289 struct socket *so = inp->inp_socket; 290 struct tcpcb *tp = intotcpcb(inp); 291 long bufsize; 292 uint32_t iss = be32toh(snd_isn) - 1; /* true ISS */ 293 uint32_t irs = be32toh(rcv_isn) - 1; /* true IRS */ 294 uint16_t tcpopt = be16toh(opt); 295 struct flowc_tx_params ftxp; 296 297 INP_WLOCK_ASSERT(inp); 298 KASSERT(tp->t_state == TCPS_SYN_SENT || 299 tp->t_state == TCPS_SYN_RECEIVED, 300 ("%s: TCP state %s", __func__, tcpstates[tp->t_state])); 301 302 CTR4(KTR_CXGBE, "%s: tid %d, toep %p, inp %p", 303 __func__, toep->tid, toep, inp); 304 305 tp->t_state = TCPS_ESTABLISHED; 306 tp->t_starttime = ticks; 307 TCPSTAT_INC(tcps_connects); 308 309 tp->irs = irs; 310 tcp_rcvseqinit(tp); 311 tp->rcv_wnd = toep->rx_credits << 10; 312 tp->rcv_adv += tp->rcv_wnd; 313 tp->last_ack_sent = tp->rcv_nxt; 314 315 /* 316 * If we were unable to send all rx credits via opt0, save the remainder 317 * in rx_credits so that they can be handed over with the next credit 318 * update. 319 */ 320 SOCKBUF_LOCK(&so->so_rcv); 321 bufsize = select_rcv_wnd(so); 322 SOCKBUF_UNLOCK(&so->so_rcv); 323 toep->rx_credits = bufsize - tp->rcv_wnd; 324 325 tp->iss = iss; 326 tcp_sendseqinit(tp); 327 tp->snd_una = iss + 1; 328 tp->snd_nxt = iss + 1; 329 tp->snd_max = iss + 1; 330 331 assign_rxopt(tp, tcpopt); 332 333 SOCKBUF_LOCK(&so->so_snd); 334 if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf) 335 bufsize = V_tcp_autosndbuf_max; 336 else 337 bufsize = sbspace(&so->so_snd); 338 SOCKBUF_UNLOCK(&so->so_snd); 339 340 ftxp.snd_nxt = tp->snd_nxt; 341 ftxp.rcv_nxt = tp->rcv_nxt; 342 ftxp.snd_space = bufsize; 343 ftxp.mss = tp->t_maxseg; 344 send_flowc_wr(toep, &ftxp); 345 346 soisconnected(so); 347 } 348 349 static int 350 send_rx_credits(struct adapter *sc, struct toepcb *toep, int credits) 351 { 352 struct wrqe *wr; 353 struct cpl_rx_data_ack *req; 354 uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); 355 356 KASSERT(credits >= 0, ("%s: %d credits", __func__, credits)); 357 358 wr = alloc_wrqe(sizeof(*req), toep->ctrlq); 359 if (wr == NULL) 360 return (0); 361 req = wrtod(wr); 362 363 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); 364 req->credit_dack = htobe32(dack | V_RX_CREDITS(credits)); 365 366 t4_wrq_tx(sc, wr); 367 return (credits); 368 } 369 370 void 371 t4_rcvd(struct toedev *tod, struct tcpcb *tp) 372 { 373 struct adapter *sc = tod->tod_softc; 374 struct inpcb *inp = tp->t_inpcb; 375 struct socket *so = inp->inp_socket; 376 struct sockbuf *sb = &so->so_rcv; 377 struct toepcb *toep = tp->t_toe; 378 int credits; 379 380 INP_WLOCK_ASSERT(inp); 381 382 SOCKBUF_LOCK(sb); 383 KASSERT(toep->sb_cc >= sbused(sb), 384 ("%s: sb %p has more data (%d) than last time (%d).", 385 __func__, sb, sbused(sb), toep->sb_cc)); 386 if (toep->ulp_mode == ULP_MODE_ISCSI) { 387 toep->rx_credits += toep->sb_cc; 388 toep->sb_cc = 0; 389 } else { 390 toep->rx_credits += toep->sb_cc - sbused(sb); 391 toep->sb_cc = sbused(sb); 392 } 393 credits = toep->rx_credits; 394 SOCKBUF_UNLOCK(sb); 395 396 if (credits > 0 && 397 (credits + 16384 >= tp->rcv_wnd || credits >= 15 * 1024)) { 398 399 credits = send_rx_credits(sc, toep, credits); 400 SOCKBUF_LOCK(sb); 401 toep->rx_credits -= credits; 402 SOCKBUF_UNLOCK(sb); 403 tp->rcv_wnd += credits; 404 tp->rcv_adv += credits; 405 } 406 } 407 408 /* 409 * Close a connection by sending a CPL_CLOSE_CON_REQ message. 410 */ 411 static int 412 close_conn(struct adapter *sc, struct toepcb *toep) 413 { 414 struct wrqe *wr; 415 struct cpl_close_con_req *req; 416 unsigned int tid = toep->tid; 417 418 CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid, 419 toep->flags & TPF_FIN_SENT ? ", IGNORED" : ""); 420 421 if (toep->flags & TPF_FIN_SENT) 422 return (0); 423 424 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 425 ("%s: flowc_wr not sent for tid %u.", __func__, tid)); 426 427 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 428 if (wr == NULL) { 429 /* XXX */ 430 panic("%s: allocation failure.", __func__); 431 } 432 req = wrtod(wr); 433 434 req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | 435 V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr))); 436 req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) | 437 V_FW_WR_FLOWID(tid)); 438 req->wr.wr_lo = cpu_to_be64(0); 439 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 440 req->rsvd = 0; 441 442 toep->flags |= TPF_FIN_SENT; 443 toep->flags &= ~TPF_SEND_FIN; 444 t4_l2t_send(sc, wr, toep->l2te); 445 446 return (0); 447 } 448 449 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16) 450 #define MIN_OFLD_TX_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16)) 451 452 /* Maximum amount of immediate data we could stuff in a WR */ 453 static inline int 454 max_imm_payload(int tx_credits) 455 { 456 const int n = 2; /* Use only up to 2 desc for imm. data WR */ 457 458 KASSERT(tx_credits >= 0 && 459 tx_credits <= MAX_OFLD_TX_CREDITS, 460 ("%s: %d credits", __func__, tx_credits)); 461 462 if (tx_credits < MIN_OFLD_TX_CREDITS) 463 return (0); 464 465 if (tx_credits >= (n * EQ_ESIZE) / 16) 466 return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr)); 467 else 468 return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr)); 469 } 470 471 /* Maximum number of SGL entries we could stuff in a WR */ 472 static inline int 473 max_dsgl_nsegs(int tx_credits) 474 { 475 int nseg = 1; /* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */ 476 int sge_pair_credits = tx_credits - MIN_OFLD_TX_CREDITS; 477 478 KASSERT(tx_credits >= 0 && 479 tx_credits <= MAX_OFLD_TX_CREDITS, 480 ("%s: %d credits", __func__, tx_credits)); 481 482 if (tx_credits < MIN_OFLD_TX_CREDITS) 483 return (0); 484 485 nseg += 2 * (sge_pair_credits * 16 / 24); 486 if ((sge_pair_credits * 16) % 24 == 16) 487 nseg++; 488 489 return (nseg); 490 } 491 492 static inline void 493 write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen, 494 unsigned int plen, uint8_t credits, int shove, int ulp_mode, int txalign) 495 { 496 struct fw_ofld_tx_data_wr *txwr = dst; 497 unsigned int wr_ulp_mode; 498 499 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) | 500 V_FW_WR_IMMDLEN(immdlen)); 501 txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) | 502 V_FW_WR_LEN16(credits)); 503 504 /* for iscsi, the mode & submode setting is per-packet */ 505 if (toep->ulp_mode == ULP_MODE_ISCSI) 506 wr_ulp_mode = V_FW_OFLD_TX_DATA_WR_ULPMODE(ulp_mode >> 4) | 507 V_FW_OFLD_TX_DATA_WR_ULPSUBMODE(ulp_mode & 3); 508 else 509 wr_ulp_mode = V_FW_OFLD_TX_DATA_WR_ULPMODE(toep->ulp_mode); 510 511 txwr->lsodisable_to_proxy = 512 htobe32(wr_ulp_mode | 513 V_FW_OFLD_TX_DATA_WR_URGENT(0) | /* XXX */ 514 V_FW_OFLD_TX_DATA_WR_SHOVE(shove)); 515 txwr->plen = htobe32(plen); 516 517 if (txalign > 0) { 518 struct tcpcb *tp = intotcpcb(toep->inp); 519 520 if (plen < 2 * tp->t_maxseg || is_10G_port(toep->port)) 521 txwr->lsodisable_to_proxy |= 522 htobe32(F_FW_OFLD_TX_DATA_WR_LSODISABLE); 523 else 524 txwr->lsodisable_to_proxy |= 525 htobe32(F_FW_OFLD_TX_DATA_WR_ALIGNPLD | 526 (tp->t_flags & TF_NODELAY ? 0 : 527 F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE)); 528 } 529 } 530 531 /* 532 * Generate a DSGL from a starting mbuf. The total number of segments and the 533 * maximum segments in any one mbuf are provided. 534 */ 535 static void 536 write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n) 537 { 538 struct mbuf *m; 539 struct ulptx_sgl *usgl = dst; 540 int i, j, rc; 541 struct sglist sg; 542 struct sglist_seg segs[n]; 543 544 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__)); 545 546 sglist_init(&sg, n, segs); 547 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 548 V_ULPTX_NSGE(nsegs)); 549 550 i = -1; 551 for (m = start; m != stop; m = m->m_next) { 552 rc = sglist_append(&sg, mtod(m, void *), m->m_len); 553 if (__predict_false(rc != 0)) 554 panic("%s: sglist_append %d", __func__, rc); 555 556 for (j = 0; j < sg.sg_nseg; i++, j++) { 557 if (i < 0) { 558 usgl->len0 = htobe32(segs[j].ss_len); 559 usgl->addr0 = htobe64(segs[j].ss_paddr); 560 } else { 561 usgl->sge[i / 2].len[i & 1] = 562 htobe32(segs[j].ss_len); 563 usgl->sge[i / 2].addr[i & 1] = 564 htobe64(segs[j].ss_paddr); 565 } 566 #ifdef INVARIANTS 567 nsegs--; 568 #endif 569 } 570 sglist_reset(&sg); 571 } 572 if (i & 1) 573 usgl->sge[i / 2].len[1] = htobe32(0); 574 KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, stop %p", 575 __func__, nsegs, start, stop)); 576 } 577 578 /* 579 * Max number of SGL entries an offload tx work request can have. This is 41 580 * (1 + 40) for a full 512B work request. 581 * fw_ofld_tx_data_wr(16B) + ulptx_sgl(16B, 1) + ulptx_sge_pair(480B, 40) 582 */ 583 #define OFLD_SGL_LEN (41) 584 585 /* 586 * Send data and/or a FIN to the peer. 587 * 588 * The socket's so_snd buffer consists of a stream of data starting with sb_mb 589 * and linked together with m_next. sb_sndptr, if set, is the last mbuf that 590 * was transmitted. 591 * 592 * drop indicates the number of bytes that should be dropped from the head of 593 * the send buffer. It is an optimization that lets do_fw4_ack avoid creating 594 * contention on the send buffer lock (before this change it used to do 595 * sowwakeup and then t4_push_frames right after that when recovering from tx 596 * stalls). When drop is set this function MUST drop the bytes and wake up any 597 * writers. 598 */ 599 void 600 t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop) 601 { 602 struct mbuf *sndptr, *m, *sb_sndptr; 603 struct fw_ofld_tx_data_wr *txwr; 604 struct wrqe *wr; 605 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 606 struct inpcb *inp = toep->inp; 607 struct tcpcb *tp = intotcpcb(inp); 608 struct socket *so = inp->inp_socket; 609 struct sockbuf *sb = &so->so_snd; 610 int tx_credits, shove, compl, space, sowwakeup; 611 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 612 613 INP_WLOCK_ASSERT(inp); 614 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 615 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 616 617 KASSERT(toep->ulp_mode == ULP_MODE_NONE || 618 toep->ulp_mode == ULP_MODE_TCPDDP || 619 toep->ulp_mode == ULP_MODE_RDMA, 620 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 621 622 /* 623 * This function doesn't resume by itself. Someone else must clear the 624 * flag and call this function. 625 */ 626 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 627 KASSERT(drop == 0, 628 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 629 return; 630 } 631 632 do { 633 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 634 max_imm = max_imm_payload(tx_credits); 635 max_nsegs = max_dsgl_nsegs(tx_credits); 636 637 SOCKBUF_LOCK(sb); 638 sowwakeup = drop; 639 if (drop) { 640 sbdrop_locked(sb, drop); 641 drop = 0; 642 } 643 sb_sndptr = sb->sb_sndptr; 644 sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb; 645 plen = 0; 646 nsegs = 0; 647 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 648 for (m = sndptr; m != NULL; m = m->m_next) { 649 int n = sglist_count(mtod(m, void *), m->m_len); 650 651 nsegs += n; 652 plen += m->m_len; 653 654 /* This mbuf sent us _over_ the nsegs limit, back out */ 655 if (plen > max_imm && nsegs > max_nsegs) { 656 nsegs -= n; 657 plen -= m->m_len; 658 if (plen == 0) { 659 /* Too few credits */ 660 toep->flags |= TPF_TX_SUSPENDED; 661 if (sowwakeup) 662 sowwakeup_locked(so); 663 else 664 SOCKBUF_UNLOCK(sb); 665 SOCKBUF_UNLOCK_ASSERT(sb); 666 return; 667 } 668 break; 669 } 670 671 if (max_nsegs_1mbuf < n) 672 max_nsegs_1mbuf = n; 673 sb_sndptr = m; /* new sb->sb_sndptr if all goes well */ 674 675 /* This mbuf put us right at the max_nsegs limit */ 676 if (plen > max_imm && nsegs == max_nsegs) { 677 m = m->m_next; 678 break; 679 } 680 } 681 682 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME); 683 space = sbspace(sb); 684 685 if (space <= sb->sb_hiwat * 3 / 8 && 686 toep->plen_nocompl + plen >= sb->sb_hiwat / 4) 687 compl = 1; 688 else 689 compl = 0; 690 691 if (sb->sb_flags & SB_AUTOSIZE && 692 V_tcp_do_autosndbuf && 693 sb->sb_hiwat < V_tcp_autosndbuf_max && 694 space < sb->sb_hiwat / 8) { 695 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc, 696 V_tcp_autosndbuf_max); 697 698 if (!sbreserve_locked(sb, newsize, so, NULL)) 699 sb->sb_flags &= ~SB_AUTOSIZE; 700 else 701 sowwakeup = 1; /* room available */ 702 } 703 if (sowwakeup) 704 sowwakeup_locked(so); 705 else 706 SOCKBUF_UNLOCK(sb); 707 SOCKBUF_UNLOCK_ASSERT(sb); 708 709 /* nothing to send */ 710 if (plen == 0) { 711 KASSERT(m == NULL, 712 ("%s: nothing to send, but m != NULL", __func__)); 713 break; 714 } 715 716 if (__predict_false(toep->flags & TPF_FIN_SENT)) 717 panic("%s: excess tx.", __func__); 718 719 if (plen <= max_imm) { 720 721 /* Immediate data tx */ 722 723 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 724 toep->ofld_txq); 725 if (wr == NULL) { 726 /* XXX: how will we recover from this? */ 727 toep->flags |= TPF_TX_SUSPENDED; 728 return; 729 } 730 txwr = wrtod(wr); 731 credits = howmany(wr->wr_len, 16); 732 write_tx_wr(txwr, toep, plen, plen, credits, shove, 0, 733 sc->tt.tx_align); 734 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 735 nsegs = 0; 736 } else { 737 int wr_len; 738 739 /* DSGL tx */ 740 741 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 742 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 743 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 744 if (wr == NULL) { 745 /* XXX: how will we recover from this? */ 746 toep->flags |= TPF_TX_SUSPENDED; 747 return; 748 } 749 txwr = wrtod(wr); 750 credits = howmany(wr_len, 16); 751 write_tx_wr(txwr, toep, 0, plen, credits, shove, 0, 752 sc->tt.tx_align); 753 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 754 max_nsegs_1mbuf); 755 if (wr_len & 0xf) { 756 uint64_t *pad = (uint64_t *) 757 ((uintptr_t)txwr + wr_len); 758 *pad = 0; 759 } 760 } 761 762 KASSERT(toep->tx_credits >= credits, 763 ("%s: not enough credits", __func__)); 764 765 toep->tx_credits -= credits; 766 toep->tx_nocompl += credits; 767 toep->plen_nocompl += plen; 768 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 769 toep->tx_nocompl >= toep->tx_total / 4) 770 compl = 1; 771 772 if (compl || toep->ulp_mode == ULP_MODE_RDMA) { 773 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 774 toep->tx_nocompl = 0; 775 toep->plen_nocompl = 0; 776 } 777 778 tp->snd_nxt += plen; 779 tp->snd_max += plen; 780 781 SOCKBUF_LOCK(sb); 782 KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__)); 783 sb->sb_sndptr = sb_sndptr; 784 SOCKBUF_UNLOCK(sb); 785 786 toep->flags |= TPF_TX_DATA_SENT; 787 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 788 toep->flags |= TPF_TX_SUSPENDED; 789 790 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 791 txsd->plen = plen; 792 txsd->tx_credits = credits; 793 txsd++; 794 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 795 toep->txsd_pidx = 0; 796 txsd = &toep->txsd[0]; 797 } 798 toep->txsd_avail--; 799 800 t4_l2t_send(sc, wr, toep->l2te); 801 } while (m != NULL); 802 803 /* Send a FIN if requested, but only if there's no more data to send */ 804 if (m == NULL && toep->flags & TPF_SEND_FIN) 805 close_conn(sc, toep); 806 } 807 808 /* Send ULP data over TOE using TX_DATA_WR. We send whole mbuf at once */ 809 void 810 t4_ulp_push_frames(struct adapter *sc, struct toepcb *toep, int drop) 811 { 812 struct mbuf *sndptr, *m = NULL; 813 struct fw_ofld_tx_data_wr *txwr; 814 struct wrqe *wr; 815 unsigned int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 816 struct inpcb *inp = toep->inp; 817 struct tcpcb *tp; 818 struct socket *so; 819 struct sockbuf *sb; 820 int tx_credits, ulp_len = 0, ulp_mode = 0, qlen = 0; 821 int shove, compl; 822 struct ofld_tx_sdesc *txsd; 823 824 INP_WLOCK_ASSERT(inp); 825 if (toep->flags & TPF_ABORT_SHUTDOWN) 826 return; 827 828 tp = intotcpcb(inp); 829 so = inp->inp_socket; 830 sb = &so->so_snd; 831 txsd = &toep->txsd[toep->txsd_pidx]; 832 833 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 834 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 835 836 /* 837 * This function doesn't resume by itself. Someone else must clear the 838 * flag and call this function. 839 */ 840 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) 841 return; 842 843 sndptr = t4_queue_iscsi_callback(so, toep, 1, &qlen); 844 if (!qlen) 845 return; 846 847 do { 848 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 849 max_imm = max_imm_payload(tx_credits); 850 max_nsegs = max_dsgl_nsegs(tx_credits); 851 852 if (drop) { 853 t4_cpl_iscsi_callback(toep->td, toep, &drop, 854 CPL_FW4_ACK); 855 drop = 0; 856 } 857 858 plen = 0; 859 nsegs = 0; 860 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 861 for (m = sndptr; m != NULL; m = m->m_next) { 862 int n = sglist_count(mtod(m, void *), m->m_len); 863 864 nsegs += n; 865 plen += m->m_len; 866 867 /* This mbuf sent us _over_ the nsegs limit, return */ 868 if (plen > max_imm && nsegs > max_nsegs) { 869 toep->flags |= TPF_TX_SUSPENDED; 870 return; 871 } 872 873 if (max_nsegs_1mbuf < n) 874 max_nsegs_1mbuf = n; 875 876 /* This mbuf put us right at the max_nsegs limit */ 877 if (plen > max_imm && nsegs == max_nsegs) { 878 toep->flags |= TPF_TX_SUSPENDED; 879 return; 880 } 881 } 882 883 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME); 884 /* nothing to send */ 885 if (plen == 0) { 886 KASSERT(m == NULL, 887 ("%s: nothing to send, but m != NULL", __func__)); 888 break; 889 } 890 891 if (__predict_false(toep->flags & TPF_FIN_SENT)) 892 panic("%s: excess tx.", __func__); 893 894 ulp_len = plen + ulp_extra_len(sndptr, &ulp_mode); 895 if (plen <= max_imm) { 896 897 /* Immediate data tx */ 898 wr = alloc_wrqe(roundup(sizeof(*txwr) + plen, 16), 899 toep->ofld_txq); 900 if (wr == NULL) { 901 /* XXX: how will we recover from this? */ 902 toep->flags |= TPF_TX_SUSPENDED; 903 return; 904 } 905 txwr = wrtod(wr); 906 credits = howmany(wr->wr_len, 16); 907 write_tx_wr(txwr, toep, plen, ulp_len, credits, shove, 908 ulp_mode, 0); 909 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 910 } else { 911 int wr_len; 912 913 /* DSGL tx */ 914 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 915 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 916 wr = alloc_wrqe(roundup(wr_len, 16), toep->ofld_txq); 917 if (wr == NULL) { 918 /* XXX: how will we recover from this? */ 919 toep->flags |= TPF_TX_SUSPENDED; 920 return; 921 } 922 txwr = wrtod(wr); 923 credits = howmany(wr_len, 16); 924 write_tx_wr(txwr, toep, 0, ulp_len, credits, shove, 925 ulp_mode, 0); 926 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 927 max_nsegs_1mbuf); 928 if (wr_len & 0xf) { 929 uint64_t *pad = (uint64_t *) 930 ((uintptr_t)txwr + wr_len); 931 *pad = 0; 932 } 933 } 934 935 KASSERT(toep->tx_credits >= credits, 936 ("%s: not enough credits", __func__)); 937 938 toep->tx_credits -= credits; 939 toep->tx_nocompl += credits; 940 toep->plen_nocompl += plen; 941 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 942 toep->tx_nocompl >= toep->tx_total / 4) 943 compl = 1; 944 945 if (compl) { 946 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 947 toep->tx_nocompl = 0; 948 toep->plen_nocompl = 0; 949 } 950 tp->snd_nxt += ulp_len; 951 tp->snd_max += ulp_len; 952 953 /* goto next mbuf */ 954 sndptr = m = t4_queue_iscsi_callback(so, toep, 2, &qlen); 955 956 toep->flags |= TPF_TX_DATA_SENT; 957 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) { 958 toep->flags |= TPF_TX_SUSPENDED; 959 } 960 961 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 962 txsd->plen = plen; 963 txsd->tx_credits = credits; 964 txsd++; 965 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 966 toep->txsd_pidx = 0; 967 txsd = &toep->txsd[0]; 968 } 969 toep->txsd_avail--; 970 971 t4_l2t_send(sc, wr, toep->l2te); 972 } while (m != NULL); 973 974 /* Send a FIN if requested, but only if there's no more data to send */ 975 if (m == NULL && toep->flags & TPF_SEND_FIN) 976 close_conn(sc, toep); 977 } 978 979 int 980 t4_tod_output(struct toedev *tod, struct tcpcb *tp) 981 { 982 struct adapter *sc = tod->tod_softc; 983 #ifdef INVARIANTS 984 struct inpcb *inp = tp->t_inpcb; 985 #endif 986 struct toepcb *toep = tp->t_toe; 987 988 INP_WLOCK_ASSERT(inp); 989 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 990 ("%s: inp %p dropped.", __func__, inp)); 991 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 992 993 t4_push_frames(sc, toep, 0); 994 995 return (0); 996 } 997 998 int 999 t4_send_fin(struct toedev *tod, struct tcpcb *tp) 1000 { 1001 struct adapter *sc = tod->tod_softc; 1002 #ifdef INVARIANTS 1003 struct inpcb *inp = tp->t_inpcb; 1004 #endif 1005 struct toepcb *toep = tp->t_toe; 1006 1007 INP_WLOCK_ASSERT(inp); 1008 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1009 ("%s: inp %p dropped.", __func__, inp)); 1010 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1011 1012 toep->flags |= TPF_SEND_FIN; 1013 if (tp->t_state >= TCPS_ESTABLISHED) { 1014 if (toep->ulp_mode == ULP_MODE_ISCSI) 1015 t4_ulp_push_frames(sc, toep, 0); 1016 else 1017 t4_push_frames(sc, toep, 0); 1018 } 1019 1020 return (0); 1021 } 1022 1023 int 1024 t4_send_rst(struct toedev *tod, struct tcpcb *tp) 1025 { 1026 struct adapter *sc = tod->tod_softc; 1027 #if defined(INVARIANTS) 1028 struct inpcb *inp = tp->t_inpcb; 1029 #endif 1030 struct toepcb *toep = tp->t_toe; 1031 1032 INP_WLOCK_ASSERT(inp); 1033 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1034 ("%s: inp %p dropped.", __func__, inp)); 1035 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1036 1037 /* hmmmm */ 1038 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 1039 ("%s: flowc for tid %u [%s] not sent already", 1040 __func__, toep->tid, tcpstates[tp->t_state])); 1041 1042 send_reset(sc, toep, 0); 1043 return (0); 1044 } 1045 1046 /* 1047 * Peer has sent us a FIN. 1048 */ 1049 static int 1050 do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1051 { 1052 struct adapter *sc = iq->adapter; 1053 const struct cpl_peer_close *cpl = (const void *)(rss + 1); 1054 unsigned int tid = GET_TID(cpl); 1055 struct toepcb *toep = lookup_tid(sc, tid); 1056 struct inpcb *inp = toep->inp; 1057 struct tcpcb *tp = NULL; 1058 struct socket *so; 1059 struct sockbuf *sb; 1060 #ifdef INVARIANTS 1061 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1062 #endif 1063 1064 KASSERT(opcode == CPL_PEER_CLOSE, 1065 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1066 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1067 1068 if (__predict_false(toep->flags & TPF_SYNQE)) { 1069 #ifdef INVARIANTS 1070 struct synq_entry *synqe = (void *)toep; 1071 1072 INP_WLOCK(synqe->lctx->inp); 1073 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1074 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1075 ("%s: listen socket closed but tid %u not aborted.", 1076 __func__, tid)); 1077 } else { 1078 /* 1079 * do_pass_accept_req is still running and will 1080 * eventually take care of this tid. 1081 */ 1082 } 1083 INP_WUNLOCK(synqe->lctx->inp); 1084 #endif 1085 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1086 toep, toep->flags); 1087 return (0); 1088 } 1089 1090 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1091 1092 INP_INFO_WLOCK(&V_tcbinfo); 1093 INP_WLOCK(inp); 1094 tp = intotcpcb(inp); 1095 1096 CTR5(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x, inp %p", __func__, 1097 tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, inp); 1098 1099 if (toep->flags & TPF_ABORT_SHUTDOWN) 1100 goto done; 1101 1102 tp->rcv_nxt++; /* FIN */ 1103 1104 so = inp->inp_socket; 1105 sb = &so->so_rcv; 1106 SOCKBUF_LOCK(sb); 1107 if (__predict_false(toep->ddp_flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE))) { 1108 m = get_ddp_mbuf(be32toh(cpl->rcv_nxt) - tp->rcv_nxt); 1109 tp->rcv_nxt = be32toh(cpl->rcv_nxt); 1110 toep->ddp_flags &= ~(DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE); 1111 1112 KASSERT(toep->sb_cc >= sbused(sb), 1113 ("%s: sb %p has more data (%d) than last time (%d).", 1114 __func__, sb, sbused(sb), toep->sb_cc)); 1115 toep->rx_credits += toep->sb_cc - sbused(sb); 1116 #ifdef USE_DDP_RX_FLOW_CONTROL 1117 toep->rx_credits -= m->m_len; /* adjust for F_RX_FC_DDP */ 1118 #endif 1119 sbappendstream_locked(sb, m, 0); 1120 toep->sb_cc = sbused(sb); 1121 } 1122 socantrcvmore_locked(so); /* unlocks the sockbuf */ 1123 1124 if (toep->ulp_mode != ULP_MODE_RDMA) { 1125 KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt), 1126 ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt, 1127 be32toh(cpl->rcv_nxt))); 1128 } 1129 1130 switch (tp->t_state) { 1131 case TCPS_SYN_RECEIVED: 1132 tp->t_starttime = ticks; 1133 /* FALLTHROUGH */ 1134 1135 case TCPS_ESTABLISHED: 1136 tp->t_state = TCPS_CLOSE_WAIT; 1137 break; 1138 1139 case TCPS_FIN_WAIT_1: 1140 tp->t_state = TCPS_CLOSING; 1141 break; 1142 1143 case TCPS_FIN_WAIT_2: 1144 tcp_twstart(tp); 1145 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1146 INP_INFO_WUNLOCK(&V_tcbinfo); 1147 1148 INP_WLOCK(inp); 1149 final_cpl_received(toep); 1150 return (0); 1151 1152 default: 1153 log(LOG_ERR, "%s: TID %u received CPL_PEER_CLOSE in state %d\n", 1154 __func__, tid, tp->t_state); 1155 } 1156 done: 1157 INP_WUNLOCK(inp); 1158 INP_INFO_WUNLOCK(&V_tcbinfo); 1159 return (0); 1160 } 1161 1162 /* 1163 * Peer has ACK'd our FIN. 1164 */ 1165 static int 1166 do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss, 1167 struct mbuf *m) 1168 { 1169 struct adapter *sc = iq->adapter; 1170 const struct cpl_close_con_rpl *cpl = (const void *)(rss + 1); 1171 unsigned int tid = GET_TID(cpl); 1172 struct toepcb *toep = lookup_tid(sc, tid); 1173 struct inpcb *inp = toep->inp; 1174 struct tcpcb *tp = NULL; 1175 struct socket *so = NULL; 1176 #ifdef INVARIANTS 1177 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1178 #endif 1179 1180 KASSERT(opcode == CPL_CLOSE_CON_RPL, 1181 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1182 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1183 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1184 1185 INP_INFO_WLOCK(&V_tcbinfo); 1186 INP_WLOCK(inp); 1187 tp = intotcpcb(inp); 1188 1189 CTR4(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x", 1190 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags); 1191 1192 if (toep->flags & TPF_ABORT_SHUTDOWN) 1193 goto done; 1194 1195 so = inp->inp_socket; 1196 tp->snd_una = be32toh(cpl->snd_nxt) - 1; /* exclude FIN */ 1197 1198 switch (tp->t_state) { 1199 case TCPS_CLOSING: /* see TCPS_FIN_WAIT_2 in do_peer_close too */ 1200 tcp_twstart(tp); 1201 release: 1202 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1203 INP_INFO_WUNLOCK(&V_tcbinfo); 1204 1205 INP_WLOCK(inp); 1206 final_cpl_received(toep); /* no more CPLs expected */ 1207 1208 return (0); 1209 case TCPS_LAST_ACK: 1210 if (tcp_close(tp)) 1211 INP_WUNLOCK(inp); 1212 goto release; 1213 1214 case TCPS_FIN_WAIT_1: 1215 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 1216 soisdisconnected(so); 1217 tp->t_state = TCPS_FIN_WAIT_2; 1218 break; 1219 1220 default: 1221 log(LOG_ERR, 1222 "%s: TID %u received CPL_CLOSE_CON_RPL in state %s\n", 1223 __func__, tid, tcpstates[tp->t_state]); 1224 } 1225 done: 1226 INP_WUNLOCK(inp); 1227 INP_INFO_WUNLOCK(&V_tcbinfo); 1228 return (0); 1229 } 1230 1231 void 1232 send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid, 1233 int rst_status) 1234 { 1235 struct wrqe *wr; 1236 struct cpl_abort_rpl *cpl; 1237 1238 wr = alloc_wrqe(sizeof(*cpl), ofld_txq); 1239 if (wr == NULL) { 1240 /* XXX */ 1241 panic("%s: allocation failure.", __func__); 1242 } 1243 cpl = wrtod(wr); 1244 1245 INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid); 1246 cpl->cmd = rst_status; 1247 1248 t4_wrq_tx(sc, wr); 1249 } 1250 1251 static int 1252 abort_status_to_errno(struct tcpcb *tp, unsigned int abort_reason) 1253 { 1254 switch (abort_reason) { 1255 case CPL_ERR_BAD_SYN: 1256 case CPL_ERR_CONN_RESET: 1257 return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET); 1258 case CPL_ERR_XMIT_TIMEDOUT: 1259 case CPL_ERR_PERSIST_TIMEDOUT: 1260 case CPL_ERR_FINWAIT2_TIMEDOUT: 1261 case CPL_ERR_KEEPALIVE_TIMEDOUT: 1262 return (ETIMEDOUT); 1263 default: 1264 return (EIO); 1265 } 1266 } 1267 1268 int 1269 cpl_not_handled(struct sge_iq *, const struct rss_header *, struct mbuf *); 1270 /* 1271 * tom_cpl_iscsi_callback - 1272 * iscsi and tom would share the following cpl messages, so when any of these 1273 * message is received, after tom is done with processing it, the messages 1274 * needs to be forwarded to iscsi for further processing: 1275 * - CPL_SET_TCB_RPL 1276 * - CPL_RX_DATA_DDP 1277 */ 1278 void (*tom_cpl_iscsi_callback)(struct tom_data *, struct socket *, void *, 1279 unsigned int); 1280 1281 struct mbuf *(*tom_queue_iscsi_callback)(struct socket *, unsigned int, int *); 1282 /* 1283 * Check if the handler function is set for a given CPL 1284 * return 0 if the function is NULL or cpl_not_handled, 1 otherwise. 1285 */ 1286 int 1287 t4tom_cpl_handler_registered(struct adapter *sc, unsigned int opcode) 1288 { 1289 1290 MPASS(opcode < nitems(sc->cpl_handler)); 1291 1292 return (sc->cpl_handler[opcode] && 1293 sc->cpl_handler[opcode] != cpl_not_handled); 1294 } 1295 1296 /* 1297 * set the tom_cpl_iscsi_callback function, this function should be used 1298 * whenever both toe and iscsi need to process the same cpl msg. 1299 */ 1300 void 1301 t4tom_register_cpl_iscsi_callback(void (*fp)(struct tom_data *, struct socket *, 1302 void *, unsigned int)) 1303 { 1304 1305 tom_cpl_iscsi_callback = fp; 1306 } 1307 1308 void 1309 t4tom_register_queue_iscsi_callback(struct mbuf *(*fp)(struct socket *, 1310 unsigned int, int *qlen)) 1311 { 1312 1313 tom_queue_iscsi_callback = fp; 1314 } 1315 1316 int 1317 t4_cpl_iscsi_callback(struct tom_data *td, struct toepcb *toep, void *m, 1318 unsigned int opcode) 1319 { 1320 struct socket *so; 1321 1322 if (opcode == CPL_FW4_ACK) 1323 so = toep->inp->inp_socket; 1324 else { 1325 INP_WLOCK(toep->inp); 1326 so = toep->inp->inp_socket; 1327 INP_WUNLOCK(toep->inp); 1328 } 1329 1330 if (tom_cpl_iscsi_callback && so) { 1331 if (toep->ulp_mode == ULP_MODE_ISCSI) { 1332 tom_cpl_iscsi_callback(td, so, m, opcode); 1333 return (0); 1334 } 1335 } 1336 1337 return (1); 1338 } 1339 1340 struct mbuf * 1341 t4_queue_iscsi_callback(struct socket *so, struct toepcb *toep, 1342 unsigned int cmd, int *qlen) 1343 { 1344 1345 if (tom_queue_iscsi_callback && so) { 1346 if (toep->ulp_mode == ULP_MODE_ISCSI) 1347 return (tom_queue_iscsi_callback(so, cmd, qlen)); 1348 } 1349 1350 return (NULL); 1351 } 1352 1353 /* 1354 * TCP RST from the peer, timeout, or some other such critical error. 1355 */ 1356 static int 1357 do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1358 { 1359 struct adapter *sc = iq->adapter; 1360 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); 1361 unsigned int tid = GET_TID(cpl); 1362 struct toepcb *toep = lookup_tid(sc, tid); 1363 struct sge_wrq *ofld_txq = toep->ofld_txq; 1364 struct inpcb *inp; 1365 struct tcpcb *tp; 1366 #ifdef INVARIANTS 1367 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1368 #endif 1369 1370 KASSERT(opcode == CPL_ABORT_REQ_RSS, 1371 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1372 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1373 1374 if (toep->flags & TPF_SYNQE) 1375 return (do_abort_req_synqe(iq, rss, m)); 1376 1377 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1378 1379 if (negative_advice(cpl->status)) { 1380 CTR4(KTR_CXGBE, "%s: negative advice %d for tid %d (0x%x)", 1381 __func__, cpl->status, tid, toep->flags); 1382 return (0); /* Ignore negative advice */ 1383 } 1384 1385 inp = toep->inp; 1386 INP_INFO_WLOCK(&V_tcbinfo); /* for tcp_close */ 1387 INP_WLOCK(inp); 1388 1389 tp = intotcpcb(inp); 1390 1391 CTR6(KTR_CXGBE, 1392 "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x, status %d", 1393 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, 1394 inp->inp_flags, cpl->status); 1395 1396 /* 1397 * If we'd initiated an abort earlier the reply to it is responsible for 1398 * cleaning up resources. Otherwise we tear everything down right here 1399 * right now. We owe the T4 a CPL_ABORT_RPL no matter what. 1400 */ 1401 if (toep->flags & TPF_ABORT_SHUTDOWN) { 1402 INP_WUNLOCK(inp); 1403 goto done; 1404 } 1405 toep->flags |= TPF_ABORT_SHUTDOWN; 1406 1407 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 1408 struct socket *so = inp->inp_socket; 1409 1410 if (so != NULL) 1411 so_error_set(so, abort_status_to_errno(tp, 1412 cpl->status)); 1413 tp = tcp_close(tp); 1414 if (tp == NULL) 1415 INP_WLOCK(inp); /* re-acquire */ 1416 } 1417 1418 final_cpl_received(toep); 1419 done: 1420 INP_INFO_WUNLOCK(&V_tcbinfo); 1421 send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST); 1422 return (0); 1423 } 1424 1425 /* 1426 * Reply to the CPL_ABORT_REQ (send_reset) 1427 */ 1428 static int 1429 do_abort_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1430 { 1431 struct adapter *sc = iq->adapter; 1432 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); 1433 unsigned int tid = GET_TID(cpl); 1434 struct toepcb *toep = lookup_tid(sc, tid); 1435 struct inpcb *inp = toep->inp; 1436 #ifdef INVARIANTS 1437 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1438 #endif 1439 1440 KASSERT(opcode == CPL_ABORT_RPL_RSS, 1441 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1442 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1443 1444 if (toep->flags & TPF_SYNQE) 1445 return (do_abort_rpl_synqe(iq, rss, m)); 1446 1447 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1448 1449 CTR5(KTR_CXGBE, "%s: tid %u, toep %p, inp %p, status %d", 1450 __func__, tid, toep, inp, cpl->status); 1451 1452 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1453 ("%s: wasn't expecting abort reply", __func__)); 1454 1455 INP_WLOCK(inp); 1456 final_cpl_received(toep); 1457 1458 return (0); 1459 } 1460 1461 static int 1462 do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1463 { 1464 struct adapter *sc = iq->adapter; 1465 const struct cpl_rx_data *cpl = mtod(m, const void *); 1466 unsigned int tid = GET_TID(cpl); 1467 struct toepcb *toep = lookup_tid(sc, tid); 1468 struct inpcb *inp = toep->inp; 1469 struct tcpcb *tp; 1470 struct socket *so; 1471 struct sockbuf *sb; 1472 int len; 1473 uint32_t ddp_placed = 0; 1474 1475 if (__predict_false(toep->flags & TPF_SYNQE)) { 1476 #ifdef INVARIANTS 1477 struct synq_entry *synqe = (void *)toep; 1478 1479 INP_WLOCK(synqe->lctx->inp); 1480 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1481 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1482 ("%s: listen socket closed but tid %u not aborted.", 1483 __func__, tid)); 1484 } else { 1485 /* 1486 * do_pass_accept_req is still running and will 1487 * eventually take care of this tid. 1488 */ 1489 } 1490 INP_WUNLOCK(synqe->lctx->inp); 1491 #endif 1492 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1493 toep, toep->flags); 1494 m_freem(m); 1495 return (0); 1496 } 1497 1498 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1499 1500 /* strip off CPL header */ 1501 m_adj(m, sizeof(*cpl)); 1502 len = m->m_pkthdr.len; 1503 1504 INP_WLOCK(inp); 1505 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) { 1506 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", 1507 __func__, tid, len, inp->inp_flags); 1508 INP_WUNLOCK(inp); 1509 m_freem(m); 1510 return (0); 1511 } 1512 1513 tp = intotcpcb(inp); 1514 1515 if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq))) 1516 ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt; 1517 1518 tp->rcv_nxt += len; 1519 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); 1520 tp->rcv_wnd -= len; 1521 tp->t_rcvtime = ticks; 1522 1523 so = inp_inpcbtosocket(inp); 1524 sb = &so->so_rcv; 1525 SOCKBUF_LOCK(sb); 1526 1527 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 1528 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)", 1529 __func__, tid, len); 1530 m_freem(m); 1531 SOCKBUF_UNLOCK(sb); 1532 INP_WUNLOCK(inp); 1533 1534 INP_INFO_WLOCK(&V_tcbinfo); 1535 INP_WLOCK(inp); 1536 tp = tcp_drop(tp, ECONNRESET); 1537 if (tp) 1538 INP_WUNLOCK(inp); 1539 INP_INFO_WUNLOCK(&V_tcbinfo); 1540 1541 return (0); 1542 } 1543 1544 /* receive buffer autosize */ 1545 if (sb->sb_flags & SB_AUTOSIZE && 1546 V_tcp_do_autorcvbuf && 1547 sb->sb_hiwat < V_tcp_autorcvbuf_max && 1548 len > (sbspace(sb) / 8 * 7)) { 1549 unsigned int hiwat = sb->sb_hiwat; 1550 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc, 1551 V_tcp_autorcvbuf_max); 1552 1553 if (!sbreserve_locked(sb, newsize, so, NULL)) 1554 sb->sb_flags &= ~SB_AUTOSIZE; 1555 else 1556 toep->rx_credits += newsize - hiwat; 1557 } 1558 1559 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1560 int changed = !(toep->ddp_flags & DDP_ON) ^ cpl->ddp_off; 1561 1562 if (changed) { 1563 if (toep->ddp_flags & DDP_SC_REQ) 1564 toep->ddp_flags ^= DDP_ON | DDP_SC_REQ; 1565 else { 1566 KASSERT(cpl->ddp_off == 1, 1567 ("%s: DDP switched on by itself.", 1568 __func__)); 1569 1570 /* Fell out of DDP mode */ 1571 toep->ddp_flags &= ~(DDP_ON | DDP_BUF0_ACTIVE | 1572 DDP_BUF1_ACTIVE); 1573 1574 if (ddp_placed) 1575 insert_ddp_data(toep, ddp_placed); 1576 } 1577 } 1578 1579 if ((toep->ddp_flags & DDP_OK) == 0 && 1580 time_uptime >= toep->ddp_disabled + DDP_RETRY_WAIT) { 1581 toep->ddp_score = DDP_LOW_SCORE; 1582 toep->ddp_flags |= DDP_OK; 1583 CTR3(KTR_CXGBE, "%s: tid %u DDP_OK @ %u", 1584 __func__, tid, time_uptime); 1585 } 1586 1587 if (toep->ddp_flags & DDP_ON) { 1588 1589 /* 1590 * CPL_RX_DATA with DDP on can only be an indicate. Ask 1591 * soreceive to post a buffer or disable DDP. The 1592 * payload that arrived in this indicate is appended to 1593 * the socket buffer as usual. 1594 */ 1595 1596 #if 0 1597 CTR5(KTR_CXGBE, 1598 "%s: tid %u (0x%x) DDP indicate (seq 0x%x, len %d)", 1599 __func__, tid, toep->flags, be32toh(cpl->seq), len); 1600 #endif 1601 sb->sb_flags |= SB_DDP_INDICATE; 1602 } else if ((toep->ddp_flags & (DDP_OK|DDP_SC_REQ)) == DDP_OK && 1603 tp->rcv_wnd > DDP_RSVD_WIN && len >= sc->tt.ddp_thres) { 1604 1605 /* 1606 * DDP allowed but isn't on (and a request to switch it 1607 * on isn't pending either), and conditions are ripe for 1608 * it to work. Switch it on. 1609 */ 1610 1611 enable_ddp(sc, toep); 1612 } 1613 } 1614 1615 KASSERT(toep->sb_cc >= sbused(sb), 1616 ("%s: sb %p has more data (%d) than last time (%d).", 1617 __func__, sb, sbused(sb), toep->sb_cc)); 1618 toep->rx_credits += toep->sb_cc - sbused(sb); 1619 sbappendstream_locked(sb, m, 0); 1620 toep->sb_cc = sbused(sb); 1621 sorwakeup_locked(so); 1622 SOCKBUF_UNLOCK_ASSERT(sb); 1623 1624 INP_WUNLOCK(inp); 1625 return (0); 1626 } 1627 1628 #define S_CPL_FW4_ACK_OPCODE 24 1629 #define M_CPL_FW4_ACK_OPCODE 0xff 1630 #define V_CPL_FW4_ACK_OPCODE(x) ((x) << S_CPL_FW4_ACK_OPCODE) 1631 #define G_CPL_FW4_ACK_OPCODE(x) \ 1632 (((x) >> S_CPL_FW4_ACK_OPCODE) & M_CPL_FW4_ACK_OPCODE) 1633 1634 #define S_CPL_FW4_ACK_FLOWID 0 1635 #define M_CPL_FW4_ACK_FLOWID 0xffffff 1636 #define V_CPL_FW4_ACK_FLOWID(x) ((x) << S_CPL_FW4_ACK_FLOWID) 1637 #define G_CPL_FW4_ACK_FLOWID(x) \ 1638 (((x) >> S_CPL_FW4_ACK_FLOWID) & M_CPL_FW4_ACK_FLOWID) 1639 1640 #define S_CPL_FW4_ACK_CR 24 1641 #define M_CPL_FW4_ACK_CR 0xff 1642 #define V_CPL_FW4_ACK_CR(x) ((x) << S_CPL_FW4_ACK_CR) 1643 #define G_CPL_FW4_ACK_CR(x) (((x) >> S_CPL_FW4_ACK_CR) & M_CPL_FW4_ACK_CR) 1644 1645 #define S_CPL_FW4_ACK_SEQVAL 0 1646 #define M_CPL_FW4_ACK_SEQVAL 0x1 1647 #define V_CPL_FW4_ACK_SEQVAL(x) ((x) << S_CPL_FW4_ACK_SEQVAL) 1648 #define G_CPL_FW4_ACK_SEQVAL(x) \ 1649 (((x) >> S_CPL_FW4_ACK_SEQVAL) & M_CPL_FW4_ACK_SEQVAL) 1650 #define F_CPL_FW4_ACK_SEQVAL V_CPL_FW4_ACK_SEQVAL(1U) 1651 1652 static int 1653 do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1654 { 1655 struct adapter *sc = iq->adapter; 1656 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 1657 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 1658 struct toepcb *toep = lookup_tid(sc, tid); 1659 struct inpcb *inp; 1660 struct tcpcb *tp; 1661 struct socket *so; 1662 uint8_t credits = cpl->credits; 1663 struct ofld_tx_sdesc *txsd; 1664 int plen; 1665 #ifdef INVARIANTS 1666 unsigned int opcode = G_CPL_FW4_ACK_OPCODE(be32toh(OPCODE_TID(cpl))); 1667 #endif 1668 1669 /* 1670 * Very unusual case: we'd sent a flowc + abort_req for a synq entry and 1671 * now this comes back carrying the credits for the flowc. 1672 */ 1673 if (__predict_false(toep->flags & TPF_SYNQE)) { 1674 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1675 ("%s: credits for a synq entry %p", __func__, toep)); 1676 return (0); 1677 } 1678 1679 inp = toep->inp; 1680 1681 KASSERT(opcode == CPL_FW4_ACK, 1682 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1683 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1684 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1685 1686 INP_WLOCK(inp); 1687 1688 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) { 1689 INP_WUNLOCK(inp); 1690 return (0); 1691 } 1692 1693 KASSERT((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0, 1694 ("%s: inp_flags 0x%x", __func__, inp->inp_flags)); 1695 1696 tp = intotcpcb(inp); 1697 1698 if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) { 1699 tcp_seq snd_una = be32toh(cpl->snd_una); 1700 1701 #ifdef INVARIANTS 1702 if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) { 1703 log(LOG_ERR, 1704 "%s: unexpected seq# %x for TID %u, snd_una %x\n", 1705 __func__, snd_una, toep->tid, tp->snd_una); 1706 } 1707 #endif 1708 1709 if (tp->snd_una != snd_una) { 1710 tp->snd_una = snd_una; 1711 tp->ts_recent_age = tcp_ts_getticks(); 1712 } 1713 } 1714 1715 so = inp->inp_socket; 1716 txsd = &toep->txsd[toep->txsd_cidx]; 1717 plen = 0; 1718 while (credits) { 1719 KASSERT(credits >= txsd->tx_credits, 1720 ("%s: too many (or partial) credits", __func__)); 1721 credits -= txsd->tx_credits; 1722 toep->tx_credits += txsd->tx_credits; 1723 plen += txsd->plen; 1724 txsd++; 1725 toep->txsd_avail++; 1726 KASSERT(toep->txsd_avail <= toep->txsd_total, 1727 ("%s: txsd avail > total", __func__)); 1728 if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) { 1729 txsd = &toep->txsd[0]; 1730 toep->txsd_cidx = 0; 1731 } 1732 } 1733 1734 if (toep->tx_credits == toep->tx_total) { 1735 toep->tx_nocompl = 0; 1736 toep->plen_nocompl = 0; 1737 } 1738 1739 if (toep->flags & TPF_TX_SUSPENDED && 1740 toep->tx_credits >= toep->tx_total / 4) { 1741 toep->flags &= ~TPF_TX_SUSPENDED; 1742 if (toep->ulp_mode == ULP_MODE_ISCSI) 1743 t4_ulp_push_frames(sc, toep, plen); 1744 else 1745 t4_push_frames(sc, toep, plen); 1746 } else if (plen > 0) { 1747 struct sockbuf *sb = &so->so_snd; 1748 1749 if (toep->ulp_mode == ULP_MODE_ISCSI) 1750 t4_cpl_iscsi_callback(toep->td, toep, &plen, 1751 CPL_FW4_ACK); 1752 else { 1753 SOCKBUF_LOCK(sb); 1754 sbdrop_locked(sb, plen); 1755 sowwakeup_locked(so); 1756 SOCKBUF_UNLOCK_ASSERT(sb); 1757 } 1758 } 1759 1760 INP_WUNLOCK(inp); 1761 1762 return (0); 1763 } 1764 1765 static int 1766 do_set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1767 { 1768 struct adapter *sc = iq->adapter; 1769 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 1770 unsigned int tid = GET_TID(cpl); 1771 #ifdef INVARIANTS 1772 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1773 #endif 1774 1775 KASSERT(opcode == CPL_SET_TCB_RPL, 1776 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1777 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1778 1779 if (is_ftid(sc, tid)) 1780 return (t4_filter_rpl(iq, rss, m)); /* TCB is a filter */ 1781 else { 1782 struct toepcb *toep = lookup_tid(sc, tid); 1783 1784 t4_cpl_iscsi_callback(toep->td, toep, m, CPL_SET_TCB_RPL); 1785 return (0); 1786 } 1787 1788 CXGBE_UNIMPLEMENTED(__func__); 1789 } 1790 1791 void 1792 t4_set_tcb_field(struct adapter *sc, struct toepcb *toep, int ctrl, 1793 uint16_t word, uint64_t mask, uint64_t val) 1794 { 1795 struct wrqe *wr; 1796 struct cpl_set_tcb_field *req; 1797 1798 wr = alloc_wrqe(sizeof(*req), ctrl ? toep->ctrlq : toep->ofld_txq); 1799 if (wr == NULL) { 1800 /* XXX */ 1801 panic("%s: allocation failure.", __func__); 1802 } 1803 req = wrtod(wr); 1804 1805 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid); 1806 req->reply_ctrl = htobe16(V_NO_REPLY(1) | 1807 V_QUEUENO(toep->ofld_rxq->iq.abs_id)); 1808 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0)); 1809 req->mask = htobe64(mask); 1810 req->val = htobe64(val); 1811 1812 t4_wrq_tx(sc, wr); 1813 } 1814 1815 void 1816 t4_init_cpl_io_handlers(struct adapter *sc) 1817 { 1818 1819 t4_register_cpl_handler(sc, CPL_PEER_CLOSE, do_peer_close); 1820 t4_register_cpl_handler(sc, CPL_CLOSE_CON_RPL, do_close_con_rpl); 1821 t4_register_cpl_handler(sc, CPL_ABORT_REQ_RSS, do_abort_req); 1822 t4_register_cpl_handler(sc, CPL_ABORT_RPL_RSS, do_abort_rpl); 1823 t4_register_cpl_handler(sc, CPL_RX_DATA, do_rx_data); 1824 t4_register_cpl_handler(sc, CPL_FW4_ACK, do_fw4_ack); 1825 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, do_set_tcb_rpl); 1826 } 1827 1828 void 1829 t4_uninit_cpl_io_handlers(struct adapter *sc) 1830 { 1831 1832 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl); 1833 } 1834 #endif 1835