1 /*- 2 * Copyright (c) 2012 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 33 #ifdef TCP_OFFLOAD 34 #include <sys/param.h> 35 #include <sys/types.h> 36 #include <sys/kernel.h> 37 #include <sys/ktr.h> 38 #include <sys/module.h> 39 #include <sys/protosw.h> 40 #include <sys/domain.h> 41 #include <sys/socket.h> 42 #include <sys/socketvar.h> 43 #include <sys/sglist.h> 44 #include <netinet/in.h> 45 #include <netinet/in_pcb.h> 46 #include <netinet/ip.h> 47 #include <netinet/tcp_var.h> 48 #define TCPSTATES 49 #include <netinet/tcp_fsm.h> 50 #include <netinet/tcp_seq.h> 51 #include <netinet/toecore.h> 52 53 #include "common/common.h" 54 #include "common/t4_msg.h" 55 #include "common/t4_regs.h" 56 #include "common/t4_tcb.h" 57 #include "tom/t4_tom_l2t.h" 58 #include "tom/t4_tom.h" 59 60 VNET_DECLARE(int, tcp_do_autosndbuf); 61 #define V_tcp_do_autosndbuf VNET(tcp_do_autosndbuf) 62 VNET_DECLARE(int, tcp_autosndbuf_inc); 63 #define V_tcp_autosndbuf_inc VNET(tcp_autosndbuf_inc) 64 VNET_DECLARE(int, tcp_autosndbuf_max); 65 #define V_tcp_autosndbuf_max VNET(tcp_autosndbuf_max) 66 VNET_DECLARE(int, tcp_do_autorcvbuf); 67 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf) 68 VNET_DECLARE(int, tcp_autorcvbuf_inc); 69 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc) 70 VNET_DECLARE(int, tcp_autorcvbuf_max); 71 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max) 72 73 /* 74 * For ULP connections HW may add headers, e.g., for digests, that aren't part 75 * of the messages sent by the host but that are part of the TCP payload and 76 * therefore consume TCP sequence space. Tx connection parameters that 77 * operate in TCP sequence space are affected by the HW additions and need to 78 * compensate for them to accurately track TCP sequence numbers. This array 79 * contains the compensating extra lengths for ULP packets. It is indexed by 80 * a packet's ULP submode. 81 */ 82 const unsigned int t4_ulp_extra_len[] = {0, 4, 4, 8}; 83 84 /* 85 * Return the length of any HW additions that will be made to a Tx packet. 86 * Such additions can happen for some types of ULP packets. 87 */ 88 static inline unsigned int 89 ulp_extra_len(struct mbuf *m, int *ulp_mode) 90 { 91 struct m_tag *mtag; 92 93 if ((mtag = m_tag_find(m, CXGBE_ISCSI_MBUF_TAG, NULL)) == NULL) 94 return (0); 95 *ulp_mode = *((int *)(mtag + 1)); 96 97 return (t4_ulp_extra_len[*ulp_mode & 3]); 98 } 99 100 void 101 send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp) 102 { 103 struct wrqe *wr; 104 struct fw_flowc_wr *flowc; 105 unsigned int nparams = ftxp ? 8 : 6, flowclen; 106 struct port_info *pi = toep->port; 107 struct adapter *sc = pi->adapter; 108 unsigned int pfvf = G_FW_VIID_PFN(pi->viid) << S_FW_VIID_PFN; 109 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 110 111 KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT), 112 ("%s: flowc for tid %u sent already", __func__, toep->tid)); 113 114 CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); 115 116 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); 117 118 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq); 119 if (wr == NULL) { 120 /* XXX */ 121 panic("%s: allocation failure.", __func__); 122 } 123 flowc = wrtod(wr); 124 memset(flowc, 0, wr->wr_len); 125 126 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 127 V_FW_FLOWC_WR_NPARAMS(nparams)); 128 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | 129 V_FW_WR_FLOWID(toep->tid)); 130 131 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 132 flowc->mnemval[0].val = htobe32(pfvf); 133 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 134 flowc->mnemval[1].val = htobe32(pi->tx_chan); 135 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 136 flowc->mnemval[2].val = htobe32(pi->tx_chan); 137 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 138 flowc->mnemval[3].val = htobe32(toep->ofld_rxq->iq.abs_id); 139 if (ftxp) { 140 uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf); 141 142 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 143 flowc->mnemval[4].val = htobe32(ftxp->snd_nxt); 144 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 145 flowc->mnemval[5].val = htobe32(ftxp->rcv_nxt); 146 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 147 flowc->mnemval[6].val = htobe32(sndbuf); 148 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 149 flowc->mnemval[7].val = htobe32(ftxp->mss); 150 } else { 151 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF; 152 flowc->mnemval[4].val = htobe32(512); 153 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS; 154 flowc->mnemval[5].val = htobe32(512); 155 } 156 157 txsd->tx_credits = howmany(flowclen, 16); 158 txsd->plen = 0; 159 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 160 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 161 toep->tx_credits -= txsd->tx_credits; 162 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 163 toep->txsd_pidx = 0; 164 toep->txsd_avail--; 165 166 toep->flags |= TPF_FLOWC_WR_SENT; 167 t4_wrq_tx(sc, wr); 168 } 169 170 void 171 send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt) 172 { 173 struct wrqe *wr; 174 struct cpl_abort_req *req; 175 int tid = toep->tid; 176 struct inpcb *inp = toep->inp; 177 struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */ 178 179 INP_WLOCK_ASSERT(inp); 180 181 CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s", 182 __func__, toep->tid, 183 inp->inp_flags & INP_DROPPED ? "inp dropped" : 184 tcpstates[tp->t_state], 185 toep->flags, inp->inp_flags, 186 toep->flags & TPF_ABORT_SHUTDOWN ? 187 " (abort already in progress)" : ""); 188 189 if (toep->flags & TPF_ABORT_SHUTDOWN) 190 return; /* abort already in progress */ 191 192 toep->flags |= TPF_ABORT_SHUTDOWN; 193 194 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 195 ("%s: flowc_wr not sent for tid %d.", __func__, tid)); 196 197 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 198 if (wr == NULL) { 199 /* XXX */ 200 panic("%s: allocation failure.", __func__); 201 } 202 req = wrtod(wr); 203 204 INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid); 205 if (inp->inp_flags & INP_DROPPED) 206 req->rsvd0 = htobe32(snd_nxt); 207 else 208 req->rsvd0 = htobe32(tp->snd_nxt); 209 req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT); 210 req->cmd = CPL_ABORT_SEND_RST; 211 212 /* 213 * XXX: What's the correct way to tell that the inp hasn't been detached 214 * from its socket? Should I even be flushing the snd buffer here? 215 */ 216 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 217 struct socket *so = inp->inp_socket; 218 219 if (so != NULL) /* because I'm not sure. See comment above */ 220 sbflush(&so->so_snd); 221 } 222 223 t4_l2t_send(sc, wr, toep->l2te); 224 } 225 226 /* 227 * Called when a connection is established to translate the TCP options 228 * reported by HW to FreeBSD's native format. 229 */ 230 static void 231 assign_rxopt(struct tcpcb *tp, unsigned int opt) 232 { 233 struct toepcb *toep = tp->t_toe; 234 struct adapter *sc = td_adapter(toep->td); 235 236 INP_LOCK_ASSERT(tp->t_inpcb); 237 238 tp->t_maxseg = tp->t_maxopd = sc->params.mtus[G_TCPOPT_MSS(opt)] - 40; 239 240 if (G_TCPOPT_TSTAMP(opt)) { 241 tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */ 242 tp->ts_recent = 0; /* hmmm */ 243 tp->ts_recent_age = tcp_ts_getticks(); 244 tp->t_maxseg -= TCPOLEN_TSTAMP_APPA; 245 } 246 247 if (G_TCPOPT_SACK(opt)) 248 tp->t_flags |= TF_SACK_PERMIT; /* should already be set */ 249 else 250 tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */ 251 252 if (G_TCPOPT_WSCALE_OK(opt)) 253 tp->t_flags |= TF_RCVD_SCALE; 254 255 /* Doing window scaling? */ 256 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 257 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 258 tp->rcv_scale = tp->request_r_scale; 259 tp->snd_scale = G_TCPOPT_SND_WSCALE(opt); 260 } 261 } 262 263 /* 264 * Completes some final bits of initialization for just established connections 265 * and changes their state to TCPS_ESTABLISHED. 266 * 267 * The ISNs are from after the exchange of SYNs. i.e., the true ISN + 1. 268 */ 269 void 270 make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn, 271 uint16_t opt) 272 { 273 struct inpcb *inp = toep->inp; 274 struct socket *so = inp->inp_socket; 275 struct tcpcb *tp = intotcpcb(inp); 276 long bufsize; 277 uint32_t iss = be32toh(snd_isn) - 1; /* true ISS */ 278 uint32_t irs = be32toh(rcv_isn) - 1; /* true IRS */ 279 uint16_t tcpopt = be16toh(opt); 280 struct flowc_tx_params ftxp; 281 282 INP_WLOCK_ASSERT(inp); 283 KASSERT(tp->t_state == TCPS_SYN_SENT || 284 tp->t_state == TCPS_SYN_RECEIVED, 285 ("%s: TCP state %s", __func__, tcpstates[tp->t_state])); 286 287 CTR4(KTR_CXGBE, "%s: tid %d, toep %p, inp %p", 288 __func__, toep->tid, toep, inp); 289 290 tp->t_state = TCPS_ESTABLISHED; 291 tp->t_starttime = ticks; 292 TCPSTAT_INC(tcps_connects); 293 294 tp->irs = irs; 295 tcp_rcvseqinit(tp); 296 tp->rcv_wnd = toep->rx_credits << 10; 297 tp->rcv_adv += tp->rcv_wnd; 298 tp->last_ack_sent = tp->rcv_nxt; 299 300 /* 301 * If we were unable to send all rx credits via opt0, save the remainder 302 * in rx_credits so that they can be handed over with the next credit 303 * update. 304 */ 305 SOCKBUF_LOCK(&so->so_rcv); 306 bufsize = select_rcv_wnd(so); 307 SOCKBUF_UNLOCK(&so->so_rcv); 308 toep->rx_credits = bufsize - tp->rcv_wnd; 309 310 tp->iss = iss; 311 tcp_sendseqinit(tp); 312 tp->snd_una = iss + 1; 313 tp->snd_nxt = iss + 1; 314 tp->snd_max = iss + 1; 315 316 assign_rxopt(tp, tcpopt); 317 318 SOCKBUF_LOCK(&so->so_snd); 319 if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf) 320 bufsize = V_tcp_autosndbuf_max; 321 else 322 bufsize = sbspace(&so->so_snd); 323 SOCKBUF_UNLOCK(&so->so_snd); 324 325 ftxp.snd_nxt = tp->snd_nxt; 326 ftxp.rcv_nxt = tp->rcv_nxt; 327 ftxp.snd_space = bufsize; 328 ftxp.mss = tp->t_maxseg; 329 send_flowc_wr(toep, &ftxp); 330 331 soisconnected(so); 332 } 333 334 static int 335 send_rx_credits(struct adapter *sc, struct toepcb *toep, int credits) 336 { 337 struct wrqe *wr; 338 struct cpl_rx_data_ack *req; 339 uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); 340 341 KASSERT(credits >= 0, ("%s: %d credits", __func__, credits)); 342 343 wr = alloc_wrqe(sizeof(*req), toep->ctrlq); 344 if (wr == NULL) 345 return (0); 346 req = wrtod(wr); 347 348 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); 349 req->credit_dack = htobe32(dack | V_RX_CREDITS(credits)); 350 351 t4_wrq_tx(sc, wr); 352 return (credits); 353 } 354 355 void 356 t4_rcvd(struct toedev *tod, struct tcpcb *tp) 357 { 358 struct adapter *sc = tod->tod_softc; 359 struct inpcb *inp = tp->t_inpcb; 360 struct socket *so = inp->inp_socket; 361 struct sockbuf *sb = &so->so_rcv; 362 struct toepcb *toep = tp->t_toe; 363 int credits; 364 365 INP_WLOCK_ASSERT(inp); 366 367 SOCKBUF_LOCK(sb); 368 KASSERT(toep->sb_cc >= sbused(sb), 369 ("%s: sb %p has more data (%d) than last time (%d).", 370 __func__, sb, sbused(sb), toep->sb_cc)); 371 if (toep->ulp_mode == ULP_MODE_ISCSI) { 372 toep->rx_credits += toep->sb_cc; 373 toep->sb_cc = 0; 374 } else { 375 toep->rx_credits += toep->sb_cc - sbused(sb); 376 toep->sb_cc = sbused(sb); 377 } 378 credits = toep->rx_credits; 379 SOCKBUF_UNLOCK(sb); 380 381 if (credits > 0 && 382 (credits + 16384 >= tp->rcv_wnd || credits >= 15 * 1024)) { 383 384 credits = send_rx_credits(sc, toep, credits); 385 SOCKBUF_LOCK(sb); 386 toep->rx_credits -= credits; 387 SOCKBUF_UNLOCK(sb); 388 tp->rcv_wnd += credits; 389 tp->rcv_adv += credits; 390 } 391 } 392 393 /* 394 * Close a connection by sending a CPL_CLOSE_CON_REQ message. 395 */ 396 static int 397 close_conn(struct adapter *sc, struct toepcb *toep) 398 { 399 struct wrqe *wr; 400 struct cpl_close_con_req *req; 401 unsigned int tid = toep->tid; 402 403 CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid, 404 toep->flags & TPF_FIN_SENT ? ", IGNORED" : ""); 405 406 if (toep->flags & TPF_FIN_SENT) 407 return (0); 408 409 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 410 ("%s: flowc_wr not sent for tid %u.", __func__, tid)); 411 412 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 413 if (wr == NULL) { 414 /* XXX */ 415 panic("%s: allocation failure.", __func__); 416 } 417 req = wrtod(wr); 418 419 req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | 420 V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr))); 421 req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) | 422 V_FW_WR_FLOWID(tid)); 423 req->wr.wr_lo = cpu_to_be64(0); 424 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 425 req->rsvd = 0; 426 427 toep->flags |= TPF_FIN_SENT; 428 toep->flags &= ~TPF_SEND_FIN; 429 t4_l2t_send(sc, wr, toep->l2te); 430 431 return (0); 432 } 433 434 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16) 435 #define MIN_OFLD_TX_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16)) 436 437 /* Maximum amount of immediate data we could stuff in a WR */ 438 static inline int 439 max_imm_payload(int tx_credits) 440 { 441 const int n = 2; /* Use only up to 2 desc for imm. data WR */ 442 443 KASSERT(tx_credits >= 0 && 444 tx_credits <= MAX_OFLD_TX_CREDITS, 445 ("%s: %d credits", __func__, tx_credits)); 446 447 if (tx_credits < MIN_OFLD_TX_CREDITS) 448 return (0); 449 450 if (tx_credits >= (n * EQ_ESIZE) / 16) 451 return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr)); 452 else 453 return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr)); 454 } 455 456 /* Maximum number of SGL entries we could stuff in a WR */ 457 static inline int 458 max_dsgl_nsegs(int tx_credits) 459 { 460 int nseg = 1; /* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */ 461 int sge_pair_credits = tx_credits - MIN_OFLD_TX_CREDITS; 462 463 KASSERT(tx_credits >= 0 && 464 tx_credits <= MAX_OFLD_TX_CREDITS, 465 ("%s: %d credits", __func__, tx_credits)); 466 467 if (tx_credits < MIN_OFLD_TX_CREDITS) 468 return (0); 469 470 nseg += 2 * (sge_pair_credits * 16 / 24); 471 if ((sge_pair_credits * 16) % 24 == 16) 472 nseg++; 473 474 return (nseg); 475 } 476 477 static inline void 478 write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen, 479 unsigned int plen, uint8_t credits, int shove, int ulp_mode) 480 { 481 struct fw_ofld_tx_data_wr *txwr = dst; 482 unsigned int wr_ulp_mode; 483 484 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) | 485 V_FW_WR_IMMDLEN(immdlen)); 486 txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) | 487 V_FW_WR_LEN16(credits)); 488 489 /* for iscsi, the mode & submode setting is per-packet */ 490 if (toep->ulp_mode == ULP_MODE_ISCSI) 491 wr_ulp_mode = V_FW_OFLD_TX_DATA_WR_ULPMODE(ulp_mode >> 4) | 492 V_FW_OFLD_TX_DATA_WR_ULPSUBMODE(ulp_mode & 3); 493 else 494 wr_ulp_mode = V_FW_OFLD_TX_DATA_WR_ULPMODE(toep->ulp_mode); 495 496 txwr->lsodisable_to_proxy = 497 htobe32(wr_ulp_mode | 498 V_FW_OFLD_TX_DATA_WR_URGENT(0) | /* XXX */ 499 V_FW_OFLD_TX_DATA_WR_SHOVE(shove)); 500 txwr->plen = htobe32(plen); 501 } 502 503 /* 504 * Generate a DSGL from a starting mbuf. The total number of segments and the 505 * maximum segments in any one mbuf are provided. 506 */ 507 static void 508 write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n) 509 { 510 struct mbuf *m; 511 struct ulptx_sgl *usgl = dst; 512 int i, j, rc; 513 struct sglist sg; 514 struct sglist_seg segs[n]; 515 516 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__)); 517 518 sglist_init(&sg, n, segs); 519 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 520 V_ULPTX_NSGE(nsegs)); 521 522 i = -1; 523 for (m = start; m != stop; m = m->m_next) { 524 rc = sglist_append(&sg, mtod(m, void *), m->m_len); 525 if (__predict_false(rc != 0)) 526 panic("%s: sglist_append %d", __func__, rc); 527 528 for (j = 0; j < sg.sg_nseg; i++, j++) { 529 if (i < 0) { 530 usgl->len0 = htobe32(segs[j].ss_len); 531 usgl->addr0 = htobe64(segs[j].ss_paddr); 532 } else { 533 usgl->sge[i / 2].len[i & 1] = 534 htobe32(segs[j].ss_len); 535 usgl->sge[i / 2].addr[i & 1] = 536 htobe64(segs[j].ss_paddr); 537 } 538 #ifdef INVARIANTS 539 nsegs--; 540 #endif 541 } 542 sglist_reset(&sg); 543 } 544 if (i & 1) 545 usgl->sge[i / 2].len[1] = htobe32(0); 546 KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, stop %p", 547 __func__, nsegs, start, stop)); 548 } 549 550 /* 551 * Max number of SGL entries an offload tx work request can have. This is 41 552 * (1 + 40) for a full 512B work request. 553 * fw_ofld_tx_data_wr(16B) + ulptx_sgl(16B, 1) + ulptx_sge_pair(480B, 40) 554 */ 555 #define OFLD_SGL_LEN (41) 556 557 /* 558 * Send data and/or a FIN to the peer. 559 * 560 * The socket's so_snd buffer consists of a stream of data starting with sb_mb 561 * and linked together with m_next. sb_sndptr, if set, is the last mbuf that 562 * was transmitted. 563 * 564 * drop indicates the number of bytes that should be dropped from the head of 565 * the send buffer. It is an optimization that lets do_fw4_ack avoid creating 566 * contention on the send buffer lock (before this change it used to do 567 * sowwakeup and then t4_push_frames right after that when recovering from tx 568 * stalls). When drop is set this function MUST drop the bytes and wake up any 569 * writers. 570 */ 571 void 572 t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop) 573 { 574 struct mbuf *sndptr, *m, *sb_sndptr; 575 struct fw_ofld_tx_data_wr *txwr; 576 struct wrqe *wr; 577 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 578 struct inpcb *inp = toep->inp; 579 struct tcpcb *tp = intotcpcb(inp); 580 struct socket *so = inp->inp_socket; 581 struct sockbuf *sb = &so->so_snd; 582 int tx_credits, shove, compl, space, sowwakeup; 583 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 584 585 INP_WLOCK_ASSERT(inp); 586 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 587 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 588 589 KASSERT(toep->ulp_mode == ULP_MODE_NONE || 590 toep->ulp_mode == ULP_MODE_TCPDDP || 591 toep->ulp_mode == ULP_MODE_RDMA, 592 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 593 594 /* 595 * This function doesn't resume by itself. Someone else must clear the 596 * flag and call this function. 597 */ 598 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 599 KASSERT(drop == 0, 600 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 601 return; 602 } 603 604 do { 605 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 606 max_imm = max_imm_payload(tx_credits); 607 max_nsegs = max_dsgl_nsegs(tx_credits); 608 609 SOCKBUF_LOCK(sb); 610 sowwakeup = drop; 611 if (drop) { 612 sbdrop_locked(sb, drop); 613 drop = 0; 614 } 615 sb_sndptr = sb->sb_sndptr; 616 sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb; 617 plen = 0; 618 nsegs = 0; 619 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 620 for (m = sndptr; m != NULL; m = m->m_next) { 621 int n = sglist_count(mtod(m, void *), m->m_len); 622 623 nsegs += n; 624 plen += m->m_len; 625 626 /* This mbuf sent us _over_ the nsegs limit, back out */ 627 if (plen > max_imm && nsegs > max_nsegs) { 628 nsegs -= n; 629 plen -= m->m_len; 630 if (plen == 0) { 631 /* Too few credits */ 632 toep->flags |= TPF_TX_SUSPENDED; 633 if (sowwakeup) 634 sowwakeup_locked(so); 635 else 636 SOCKBUF_UNLOCK(sb); 637 SOCKBUF_UNLOCK_ASSERT(sb); 638 return; 639 } 640 break; 641 } 642 643 if (max_nsegs_1mbuf < n) 644 max_nsegs_1mbuf = n; 645 sb_sndptr = m; /* new sb->sb_sndptr if all goes well */ 646 647 /* This mbuf put us right at the max_nsegs limit */ 648 if (plen > max_imm && nsegs == max_nsegs) { 649 m = m->m_next; 650 break; 651 } 652 } 653 654 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME); 655 space = sbspace(sb); 656 657 if (space <= sb->sb_hiwat * 3 / 8 && 658 toep->plen_nocompl + plen >= sb->sb_hiwat / 4) 659 compl = 1; 660 else 661 compl = 0; 662 663 if (sb->sb_flags & SB_AUTOSIZE && 664 V_tcp_do_autosndbuf && 665 sb->sb_hiwat < V_tcp_autosndbuf_max && 666 space < sb->sb_hiwat / 8) { 667 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc, 668 V_tcp_autosndbuf_max); 669 670 if (!sbreserve_locked(sb, newsize, so, NULL)) 671 sb->sb_flags &= ~SB_AUTOSIZE; 672 else 673 sowwakeup = 1; /* room available */ 674 } 675 if (sowwakeup) 676 sowwakeup_locked(so); 677 else 678 SOCKBUF_UNLOCK(sb); 679 SOCKBUF_UNLOCK_ASSERT(sb); 680 681 /* nothing to send */ 682 if (plen == 0) { 683 KASSERT(m == NULL, 684 ("%s: nothing to send, but m != NULL", __func__)); 685 break; 686 } 687 688 if (__predict_false(toep->flags & TPF_FIN_SENT)) 689 panic("%s: excess tx.", __func__); 690 691 if (plen <= max_imm) { 692 693 /* Immediate data tx */ 694 695 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 696 toep->ofld_txq); 697 if (wr == NULL) { 698 /* XXX: how will we recover from this? */ 699 toep->flags |= TPF_TX_SUSPENDED; 700 return; 701 } 702 txwr = wrtod(wr); 703 credits = howmany(wr->wr_len, 16); 704 write_tx_wr(txwr, toep, plen, plen, credits, shove, 0); 705 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 706 nsegs = 0; 707 } else { 708 int wr_len; 709 710 /* DSGL tx */ 711 712 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 713 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 714 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 715 if (wr == NULL) { 716 /* XXX: how will we recover from this? */ 717 toep->flags |= TPF_TX_SUSPENDED; 718 return; 719 } 720 txwr = wrtod(wr); 721 credits = howmany(wr_len, 16); 722 write_tx_wr(txwr, toep, 0, plen, credits, shove, 0); 723 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 724 max_nsegs_1mbuf); 725 if (wr_len & 0xf) { 726 uint64_t *pad = (uint64_t *) 727 ((uintptr_t)txwr + wr_len); 728 *pad = 0; 729 } 730 } 731 732 KASSERT(toep->tx_credits >= credits, 733 ("%s: not enough credits", __func__)); 734 735 toep->tx_credits -= credits; 736 toep->tx_nocompl += credits; 737 toep->plen_nocompl += plen; 738 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 739 toep->tx_nocompl >= toep->tx_total / 4) 740 compl = 1; 741 742 if (compl || toep->ulp_mode == ULP_MODE_RDMA) { 743 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 744 toep->tx_nocompl = 0; 745 toep->plen_nocompl = 0; 746 } 747 748 tp->snd_nxt += plen; 749 tp->snd_max += plen; 750 751 SOCKBUF_LOCK(sb); 752 KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__)); 753 sb->sb_sndptr = sb_sndptr; 754 SOCKBUF_UNLOCK(sb); 755 756 toep->flags |= TPF_TX_DATA_SENT; 757 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 758 toep->flags |= TPF_TX_SUSPENDED; 759 760 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 761 txsd->plen = plen; 762 txsd->tx_credits = credits; 763 txsd++; 764 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 765 toep->txsd_pidx = 0; 766 txsd = &toep->txsd[0]; 767 } 768 toep->txsd_avail--; 769 770 t4_l2t_send(sc, wr, toep->l2te); 771 } while (m != NULL); 772 773 /* Send a FIN if requested, but only if there's no more data to send */ 774 if (m == NULL && toep->flags & TPF_SEND_FIN) 775 close_conn(sc, toep); 776 } 777 778 /* Send ULP data over TOE using TX_DATA_WR. We send whole mbuf at once */ 779 void 780 t4_ulp_push_frames(struct adapter *sc, struct toepcb *toep, int drop) 781 { 782 struct mbuf *sndptr, *m = NULL; 783 struct fw_ofld_tx_data_wr *txwr; 784 struct wrqe *wr; 785 unsigned int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 786 struct inpcb *inp = toep->inp; 787 struct tcpcb *tp; 788 struct socket *so; 789 struct sockbuf *sb; 790 int tx_credits, ulp_len = 0, ulp_mode = 0, qlen = 0; 791 int shove, compl; 792 struct ofld_tx_sdesc *txsd; 793 794 INP_WLOCK_ASSERT(inp); 795 if (toep->flags & TPF_ABORT_SHUTDOWN) 796 return; 797 798 tp = intotcpcb(inp); 799 so = inp->inp_socket; 800 sb = &so->so_snd; 801 txsd = &toep->txsd[toep->txsd_pidx]; 802 803 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 804 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 805 806 /* 807 * This function doesn't resume by itself. Someone else must clear the 808 * flag and call this function. 809 */ 810 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) 811 return; 812 813 sndptr = t4_queue_iscsi_callback(so, toep, 1, &qlen); 814 if (!qlen) 815 return; 816 817 do { 818 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 819 max_imm = max_imm_payload(tx_credits); 820 max_nsegs = max_dsgl_nsegs(tx_credits); 821 822 if (drop) { 823 t4_cpl_iscsi_callback(toep->td, toep, &drop, 824 CPL_FW4_ACK); 825 drop = 0; 826 } 827 828 plen = 0; 829 nsegs = 0; 830 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 831 for (m = sndptr; m != NULL; m = m->m_next) { 832 int n = sglist_count(mtod(m, void *), m->m_len); 833 834 nsegs += n; 835 plen += m->m_len; 836 837 /* This mbuf sent us _over_ the nsegs limit, return */ 838 if (plen > max_imm && nsegs > max_nsegs) { 839 toep->flags |= TPF_TX_SUSPENDED; 840 return; 841 } 842 843 if (max_nsegs_1mbuf < n) 844 max_nsegs_1mbuf = n; 845 846 /* This mbuf put us right at the max_nsegs limit */ 847 if (plen > max_imm && nsegs == max_nsegs) { 848 toep->flags |= TPF_TX_SUSPENDED; 849 return; 850 } 851 } 852 853 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME); 854 /* nothing to send */ 855 if (plen == 0) { 856 KASSERT(m == NULL, 857 ("%s: nothing to send, but m != NULL", __func__)); 858 break; 859 } 860 861 if (__predict_false(toep->flags & TPF_FIN_SENT)) 862 panic("%s: excess tx.", __func__); 863 864 ulp_len = plen + ulp_extra_len(sndptr, &ulp_mode); 865 if (plen <= max_imm) { 866 867 /* Immediate data tx */ 868 wr = alloc_wrqe(roundup(sizeof(*txwr) + plen, 16), 869 toep->ofld_txq); 870 if (wr == NULL) { 871 /* XXX: how will we recover from this? */ 872 toep->flags |= TPF_TX_SUSPENDED; 873 return; 874 } 875 txwr = wrtod(wr); 876 credits = howmany(wr->wr_len, 16); 877 write_tx_wr(txwr, toep, plen, ulp_len, credits, shove, 878 ulp_mode); 879 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 880 } else { 881 int wr_len; 882 883 /* DSGL tx */ 884 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 885 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 886 wr = alloc_wrqe(roundup(wr_len, 16), toep->ofld_txq); 887 if (wr == NULL) { 888 /* XXX: how will we recover from this? */ 889 toep->flags |= TPF_TX_SUSPENDED; 890 return; 891 } 892 txwr = wrtod(wr); 893 credits = howmany(wr_len, 16); 894 write_tx_wr(txwr, toep, 0, ulp_len, credits, shove, 895 ulp_mode); 896 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 897 max_nsegs_1mbuf); 898 if (wr_len & 0xf) { 899 uint64_t *pad = (uint64_t *) 900 ((uintptr_t)txwr + wr_len); 901 *pad = 0; 902 } 903 } 904 905 KASSERT(toep->tx_credits >= credits, 906 ("%s: not enough credits", __func__)); 907 908 toep->tx_credits -= credits; 909 toep->tx_nocompl += credits; 910 toep->plen_nocompl += plen; 911 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 912 toep->tx_nocompl >= toep->tx_total / 4) 913 compl = 1; 914 915 if (compl) { 916 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 917 toep->tx_nocompl = 0; 918 toep->plen_nocompl = 0; 919 } 920 tp->snd_nxt += ulp_len; 921 tp->snd_max += ulp_len; 922 923 /* goto next mbuf */ 924 sndptr = m = t4_queue_iscsi_callback(so, toep, 2, &qlen); 925 926 toep->flags |= TPF_TX_DATA_SENT; 927 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) { 928 toep->flags |= TPF_TX_SUSPENDED; 929 } 930 931 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 932 txsd->plen = plen; 933 txsd->tx_credits = credits; 934 txsd++; 935 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 936 toep->txsd_pidx = 0; 937 txsd = &toep->txsd[0]; 938 } 939 toep->txsd_avail--; 940 941 t4_l2t_send(sc, wr, toep->l2te); 942 } while (m != NULL); 943 944 /* Send a FIN if requested, but only if there's no more data to send */ 945 if (m == NULL && toep->flags & TPF_SEND_FIN) 946 close_conn(sc, toep); 947 } 948 949 int 950 t4_tod_output(struct toedev *tod, struct tcpcb *tp) 951 { 952 struct adapter *sc = tod->tod_softc; 953 #ifdef INVARIANTS 954 struct inpcb *inp = tp->t_inpcb; 955 #endif 956 struct toepcb *toep = tp->t_toe; 957 958 INP_WLOCK_ASSERT(inp); 959 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 960 ("%s: inp %p dropped.", __func__, inp)); 961 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 962 963 t4_push_frames(sc, toep, 0); 964 965 return (0); 966 } 967 968 int 969 t4_send_fin(struct toedev *tod, struct tcpcb *tp) 970 { 971 struct adapter *sc = tod->tod_softc; 972 #ifdef INVARIANTS 973 struct inpcb *inp = tp->t_inpcb; 974 #endif 975 struct toepcb *toep = tp->t_toe; 976 977 INP_WLOCK_ASSERT(inp); 978 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 979 ("%s: inp %p dropped.", __func__, inp)); 980 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 981 982 toep->flags |= TPF_SEND_FIN; 983 if (tp->t_state >= TCPS_ESTABLISHED) { 984 if (toep->ulp_mode == ULP_MODE_ISCSI) 985 t4_ulp_push_frames(sc, toep, 0); 986 else 987 t4_push_frames(sc, toep, 0); 988 } 989 990 return (0); 991 } 992 993 int 994 t4_send_rst(struct toedev *tod, struct tcpcb *tp) 995 { 996 struct adapter *sc = tod->tod_softc; 997 #if defined(INVARIANTS) 998 struct inpcb *inp = tp->t_inpcb; 999 #endif 1000 struct toepcb *toep = tp->t_toe; 1001 1002 INP_WLOCK_ASSERT(inp); 1003 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1004 ("%s: inp %p dropped.", __func__, inp)); 1005 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1006 1007 /* hmmmm */ 1008 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 1009 ("%s: flowc for tid %u [%s] not sent already", 1010 __func__, toep->tid, tcpstates[tp->t_state])); 1011 1012 send_reset(sc, toep, 0); 1013 return (0); 1014 } 1015 1016 /* 1017 * Peer has sent us a FIN. 1018 */ 1019 static int 1020 do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1021 { 1022 struct adapter *sc = iq->adapter; 1023 const struct cpl_peer_close *cpl = (const void *)(rss + 1); 1024 unsigned int tid = GET_TID(cpl); 1025 struct toepcb *toep = lookup_tid(sc, tid); 1026 struct inpcb *inp = toep->inp; 1027 struct tcpcb *tp = NULL; 1028 struct socket *so; 1029 struct sockbuf *sb; 1030 #ifdef INVARIANTS 1031 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1032 #endif 1033 1034 KASSERT(opcode == CPL_PEER_CLOSE, 1035 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1036 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1037 1038 if (__predict_false(toep->flags & TPF_SYNQE)) { 1039 #ifdef INVARIANTS 1040 struct synq_entry *synqe = (void *)toep; 1041 1042 INP_WLOCK(synqe->lctx->inp); 1043 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1044 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1045 ("%s: listen socket closed but tid %u not aborted.", 1046 __func__, tid)); 1047 } else { 1048 /* 1049 * do_pass_accept_req is still running and will 1050 * eventually take care of this tid. 1051 */ 1052 } 1053 INP_WUNLOCK(synqe->lctx->inp); 1054 #endif 1055 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1056 toep, toep->flags); 1057 return (0); 1058 } 1059 1060 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1061 1062 INP_INFO_WLOCK(&V_tcbinfo); 1063 INP_WLOCK(inp); 1064 tp = intotcpcb(inp); 1065 1066 CTR5(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x, inp %p", __func__, 1067 tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, inp); 1068 1069 if (toep->flags & TPF_ABORT_SHUTDOWN) 1070 goto done; 1071 1072 tp->rcv_nxt++; /* FIN */ 1073 1074 so = inp->inp_socket; 1075 sb = &so->so_rcv; 1076 SOCKBUF_LOCK(sb); 1077 if (__predict_false(toep->ddp_flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE))) { 1078 m = get_ddp_mbuf(be32toh(cpl->rcv_nxt) - tp->rcv_nxt); 1079 tp->rcv_nxt = be32toh(cpl->rcv_nxt); 1080 toep->ddp_flags &= ~(DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE); 1081 1082 KASSERT(toep->sb_cc >= sbused(sb), 1083 ("%s: sb %p has more data (%d) than last time (%d).", 1084 __func__, sb, sbused(sb), toep->sb_cc)); 1085 toep->rx_credits += toep->sb_cc - sbused(sb); 1086 #ifdef USE_DDP_RX_FLOW_CONTROL 1087 toep->rx_credits -= m->m_len; /* adjust for F_RX_FC_DDP */ 1088 #endif 1089 sbappendstream_locked(sb, m, 0); 1090 toep->sb_cc = sbused(sb); 1091 } 1092 socantrcvmore_locked(so); /* unlocks the sockbuf */ 1093 1094 if (toep->ulp_mode != ULP_MODE_RDMA) { 1095 KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt), 1096 ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt, 1097 be32toh(cpl->rcv_nxt))); 1098 } 1099 1100 switch (tp->t_state) { 1101 case TCPS_SYN_RECEIVED: 1102 tp->t_starttime = ticks; 1103 /* FALLTHROUGH */ 1104 1105 case TCPS_ESTABLISHED: 1106 tp->t_state = TCPS_CLOSE_WAIT; 1107 break; 1108 1109 case TCPS_FIN_WAIT_1: 1110 tp->t_state = TCPS_CLOSING; 1111 break; 1112 1113 case TCPS_FIN_WAIT_2: 1114 tcp_twstart(tp); 1115 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1116 INP_INFO_WUNLOCK(&V_tcbinfo); 1117 1118 INP_WLOCK(inp); 1119 final_cpl_received(toep); 1120 return (0); 1121 1122 default: 1123 log(LOG_ERR, "%s: TID %u received CPL_PEER_CLOSE in state %d\n", 1124 __func__, tid, tp->t_state); 1125 } 1126 done: 1127 INP_WUNLOCK(inp); 1128 INP_INFO_WUNLOCK(&V_tcbinfo); 1129 return (0); 1130 } 1131 1132 /* 1133 * Peer has ACK'd our FIN. 1134 */ 1135 static int 1136 do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss, 1137 struct mbuf *m) 1138 { 1139 struct adapter *sc = iq->adapter; 1140 const struct cpl_close_con_rpl *cpl = (const void *)(rss + 1); 1141 unsigned int tid = GET_TID(cpl); 1142 struct toepcb *toep = lookup_tid(sc, tid); 1143 struct inpcb *inp = toep->inp; 1144 struct tcpcb *tp = NULL; 1145 struct socket *so = NULL; 1146 #ifdef INVARIANTS 1147 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1148 #endif 1149 1150 KASSERT(opcode == CPL_CLOSE_CON_RPL, 1151 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1152 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1153 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1154 1155 INP_INFO_WLOCK(&V_tcbinfo); 1156 INP_WLOCK(inp); 1157 tp = intotcpcb(inp); 1158 1159 CTR4(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x", 1160 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags); 1161 1162 if (toep->flags & TPF_ABORT_SHUTDOWN) 1163 goto done; 1164 1165 so = inp->inp_socket; 1166 tp->snd_una = be32toh(cpl->snd_nxt) - 1; /* exclude FIN */ 1167 1168 switch (tp->t_state) { 1169 case TCPS_CLOSING: /* see TCPS_FIN_WAIT_2 in do_peer_close too */ 1170 tcp_twstart(tp); 1171 release: 1172 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1173 INP_INFO_WUNLOCK(&V_tcbinfo); 1174 1175 INP_WLOCK(inp); 1176 final_cpl_received(toep); /* no more CPLs expected */ 1177 1178 return (0); 1179 case TCPS_LAST_ACK: 1180 if (tcp_close(tp)) 1181 INP_WUNLOCK(inp); 1182 goto release; 1183 1184 case TCPS_FIN_WAIT_1: 1185 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 1186 soisdisconnected(so); 1187 tp->t_state = TCPS_FIN_WAIT_2; 1188 break; 1189 1190 default: 1191 log(LOG_ERR, 1192 "%s: TID %u received CPL_CLOSE_CON_RPL in state %s\n", 1193 __func__, tid, tcpstates[tp->t_state]); 1194 } 1195 done: 1196 INP_WUNLOCK(inp); 1197 INP_INFO_WUNLOCK(&V_tcbinfo); 1198 return (0); 1199 } 1200 1201 void 1202 send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid, 1203 int rst_status) 1204 { 1205 struct wrqe *wr; 1206 struct cpl_abort_rpl *cpl; 1207 1208 wr = alloc_wrqe(sizeof(*cpl), ofld_txq); 1209 if (wr == NULL) { 1210 /* XXX */ 1211 panic("%s: allocation failure.", __func__); 1212 } 1213 cpl = wrtod(wr); 1214 1215 INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid); 1216 cpl->cmd = rst_status; 1217 1218 t4_wrq_tx(sc, wr); 1219 } 1220 1221 static int 1222 abort_status_to_errno(struct tcpcb *tp, unsigned int abort_reason) 1223 { 1224 switch (abort_reason) { 1225 case CPL_ERR_BAD_SYN: 1226 case CPL_ERR_CONN_RESET: 1227 return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET); 1228 case CPL_ERR_XMIT_TIMEDOUT: 1229 case CPL_ERR_PERSIST_TIMEDOUT: 1230 case CPL_ERR_FINWAIT2_TIMEDOUT: 1231 case CPL_ERR_KEEPALIVE_TIMEDOUT: 1232 return (ETIMEDOUT); 1233 default: 1234 return (EIO); 1235 } 1236 } 1237 1238 int 1239 cpl_not_handled(struct sge_iq *, const struct rss_header *, struct mbuf *); 1240 /* 1241 * tom_cpl_iscsi_callback - 1242 * iscsi and tom would share the following cpl messages, so when any of these 1243 * message is received, after tom is done with processing it, the messages 1244 * needs to be forwarded to iscsi for further processing: 1245 * - CPL_SET_TCB_RPL 1246 * - CPL_RX_DATA_DDP 1247 */ 1248 void (*tom_cpl_iscsi_callback)(struct tom_data *, struct socket *, void *, 1249 unsigned int); 1250 1251 struct mbuf *(*tom_queue_iscsi_callback)(struct socket *, unsigned int, int *); 1252 /* 1253 * Check if the handler function is set for a given CPL 1254 * return 0 if the function is NULL or cpl_not_handled, 1 otherwise. 1255 */ 1256 int 1257 t4tom_cpl_handler_registered(struct adapter *sc, unsigned int opcode) 1258 { 1259 1260 MPASS(opcode < nitems(sc->cpl_handler)); 1261 1262 return (sc->cpl_handler[opcode] && 1263 sc->cpl_handler[opcode] != cpl_not_handled); 1264 } 1265 1266 /* 1267 * set the tom_cpl_iscsi_callback function, this function should be used 1268 * whenever both toe and iscsi need to process the same cpl msg. 1269 */ 1270 void 1271 t4tom_register_cpl_iscsi_callback(void (*fp)(struct tom_data *, struct socket *, 1272 void *, unsigned int)) 1273 { 1274 1275 tom_cpl_iscsi_callback = fp; 1276 } 1277 1278 void 1279 t4tom_register_queue_iscsi_callback(struct mbuf *(*fp)(struct socket *, 1280 unsigned int, int *qlen)) 1281 { 1282 1283 tom_queue_iscsi_callback = fp; 1284 } 1285 1286 int 1287 t4_cpl_iscsi_callback(struct tom_data *td, struct toepcb *toep, void *m, 1288 unsigned int opcode) 1289 { 1290 struct socket *so; 1291 1292 if (opcode == CPL_FW4_ACK) 1293 so = toep->inp->inp_socket; 1294 else { 1295 INP_WLOCK(toep->inp); 1296 so = toep->inp->inp_socket; 1297 INP_WUNLOCK(toep->inp); 1298 } 1299 1300 if (tom_cpl_iscsi_callback && so) { 1301 if (toep->ulp_mode == ULP_MODE_ISCSI) { 1302 tom_cpl_iscsi_callback(td, so, m, opcode); 1303 return (0); 1304 } 1305 } 1306 1307 return (1); 1308 } 1309 1310 struct mbuf * 1311 t4_queue_iscsi_callback(struct socket *so, struct toepcb *toep, 1312 unsigned int cmd, int *qlen) 1313 { 1314 1315 if (tom_queue_iscsi_callback && so) { 1316 if (toep->ulp_mode == ULP_MODE_ISCSI) 1317 return (tom_queue_iscsi_callback(so, cmd, qlen)); 1318 } 1319 1320 return (NULL); 1321 } 1322 1323 /* 1324 * TCP RST from the peer, timeout, or some other such critical error. 1325 */ 1326 static int 1327 do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1328 { 1329 struct adapter *sc = iq->adapter; 1330 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); 1331 unsigned int tid = GET_TID(cpl); 1332 struct toepcb *toep = lookup_tid(sc, tid); 1333 struct sge_wrq *ofld_txq = toep->ofld_txq; 1334 struct inpcb *inp; 1335 struct tcpcb *tp; 1336 #ifdef INVARIANTS 1337 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1338 #endif 1339 1340 KASSERT(opcode == CPL_ABORT_REQ_RSS, 1341 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1342 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1343 1344 if (toep->flags & TPF_SYNQE) 1345 return (do_abort_req_synqe(iq, rss, m)); 1346 1347 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1348 1349 if (negative_advice(cpl->status)) { 1350 CTR4(KTR_CXGBE, "%s: negative advice %d for tid %d (0x%x)", 1351 __func__, cpl->status, tid, toep->flags); 1352 return (0); /* Ignore negative advice */ 1353 } 1354 1355 inp = toep->inp; 1356 INP_INFO_WLOCK(&V_tcbinfo); /* for tcp_close */ 1357 INP_WLOCK(inp); 1358 1359 tp = intotcpcb(inp); 1360 1361 CTR6(KTR_CXGBE, 1362 "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x, status %d", 1363 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, 1364 inp->inp_flags, cpl->status); 1365 1366 /* 1367 * If we'd initiated an abort earlier the reply to it is responsible for 1368 * cleaning up resources. Otherwise we tear everything down right here 1369 * right now. We owe the T4 a CPL_ABORT_RPL no matter what. 1370 */ 1371 if (toep->flags & TPF_ABORT_SHUTDOWN) { 1372 INP_WUNLOCK(inp); 1373 goto done; 1374 } 1375 toep->flags |= TPF_ABORT_SHUTDOWN; 1376 1377 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 1378 struct socket *so = inp->inp_socket; 1379 1380 if (so != NULL) 1381 so_error_set(so, abort_status_to_errno(tp, 1382 cpl->status)); 1383 tp = tcp_close(tp); 1384 if (tp == NULL) 1385 INP_WLOCK(inp); /* re-acquire */ 1386 } 1387 1388 final_cpl_received(toep); 1389 done: 1390 INP_INFO_WUNLOCK(&V_tcbinfo); 1391 send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST); 1392 return (0); 1393 } 1394 1395 /* 1396 * Reply to the CPL_ABORT_REQ (send_reset) 1397 */ 1398 static int 1399 do_abort_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1400 { 1401 struct adapter *sc = iq->adapter; 1402 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); 1403 unsigned int tid = GET_TID(cpl); 1404 struct toepcb *toep = lookup_tid(sc, tid); 1405 struct inpcb *inp = toep->inp; 1406 #ifdef INVARIANTS 1407 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1408 #endif 1409 1410 KASSERT(opcode == CPL_ABORT_RPL_RSS, 1411 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1412 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1413 1414 if (toep->flags & TPF_SYNQE) 1415 return (do_abort_rpl_synqe(iq, rss, m)); 1416 1417 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1418 1419 CTR5(KTR_CXGBE, "%s: tid %u, toep %p, inp %p, status %d", 1420 __func__, tid, toep, inp, cpl->status); 1421 1422 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1423 ("%s: wasn't expecting abort reply", __func__)); 1424 1425 INP_WLOCK(inp); 1426 final_cpl_received(toep); 1427 1428 return (0); 1429 } 1430 1431 static int 1432 do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1433 { 1434 struct adapter *sc = iq->adapter; 1435 const struct cpl_rx_data *cpl = mtod(m, const void *); 1436 unsigned int tid = GET_TID(cpl); 1437 struct toepcb *toep = lookup_tid(sc, tid); 1438 struct inpcb *inp = toep->inp; 1439 struct tcpcb *tp; 1440 struct socket *so; 1441 struct sockbuf *sb; 1442 int len; 1443 uint32_t ddp_placed = 0; 1444 1445 if (__predict_false(toep->flags & TPF_SYNQE)) { 1446 #ifdef INVARIANTS 1447 struct synq_entry *synqe = (void *)toep; 1448 1449 INP_WLOCK(synqe->lctx->inp); 1450 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1451 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1452 ("%s: listen socket closed but tid %u not aborted.", 1453 __func__, tid)); 1454 } else { 1455 /* 1456 * do_pass_accept_req is still running and will 1457 * eventually take care of this tid. 1458 */ 1459 } 1460 INP_WUNLOCK(synqe->lctx->inp); 1461 #endif 1462 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1463 toep, toep->flags); 1464 m_freem(m); 1465 return (0); 1466 } 1467 1468 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1469 1470 /* strip off CPL header */ 1471 m_adj(m, sizeof(*cpl)); 1472 len = m->m_pkthdr.len; 1473 1474 INP_WLOCK(inp); 1475 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) { 1476 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", 1477 __func__, tid, len, inp->inp_flags); 1478 INP_WUNLOCK(inp); 1479 m_freem(m); 1480 return (0); 1481 } 1482 1483 tp = intotcpcb(inp); 1484 1485 if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq))) 1486 ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt; 1487 1488 tp->rcv_nxt += len; 1489 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); 1490 tp->rcv_wnd -= len; 1491 tp->t_rcvtime = ticks; 1492 1493 so = inp_inpcbtosocket(inp); 1494 sb = &so->so_rcv; 1495 SOCKBUF_LOCK(sb); 1496 1497 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 1498 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)", 1499 __func__, tid, len); 1500 m_freem(m); 1501 SOCKBUF_UNLOCK(sb); 1502 INP_WUNLOCK(inp); 1503 1504 INP_INFO_WLOCK(&V_tcbinfo); 1505 INP_WLOCK(inp); 1506 tp = tcp_drop(tp, ECONNRESET); 1507 if (tp) 1508 INP_WUNLOCK(inp); 1509 INP_INFO_WUNLOCK(&V_tcbinfo); 1510 1511 return (0); 1512 } 1513 1514 /* receive buffer autosize */ 1515 if (sb->sb_flags & SB_AUTOSIZE && 1516 V_tcp_do_autorcvbuf && 1517 sb->sb_hiwat < V_tcp_autorcvbuf_max && 1518 len > (sbspace(sb) / 8 * 7)) { 1519 unsigned int hiwat = sb->sb_hiwat; 1520 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc, 1521 V_tcp_autorcvbuf_max); 1522 1523 if (!sbreserve_locked(sb, newsize, so, NULL)) 1524 sb->sb_flags &= ~SB_AUTOSIZE; 1525 else 1526 toep->rx_credits += newsize - hiwat; 1527 } 1528 1529 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1530 int changed = !(toep->ddp_flags & DDP_ON) ^ cpl->ddp_off; 1531 1532 if (changed) { 1533 if (toep->ddp_flags & DDP_SC_REQ) 1534 toep->ddp_flags ^= DDP_ON | DDP_SC_REQ; 1535 else { 1536 KASSERT(cpl->ddp_off == 1, 1537 ("%s: DDP switched on by itself.", 1538 __func__)); 1539 1540 /* Fell out of DDP mode */ 1541 toep->ddp_flags &= ~(DDP_ON | DDP_BUF0_ACTIVE | 1542 DDP_BUF1_ACTIVE); 1543 1544 if (ddp_placed) 1545 insert_ddp_data(toep, ddp_placed); 1546 } 1547 } 1548 1549 if ((toep->ddp_flags & DDP_OK) == 0 && 1550 time_uptime >= toep->ddp_disabled + DDP_RETRY_WAIT) { 1551 toep->ddp_score = DDP_LOW_SCORE; 1552 toep->ddp_flags |= DDP_OK; 1553 CTR3(KTR_CXGBE, "%s: tid %u DDP_OK @ %u", 1554 __func__, tid, time_uptime); 1555 } 1556 1557 if (toep->ddp_flags & DDP_ON) { 1558 1559 /* 1560 * CPL_RX_DATA with DDP on can only be an indicate. Ask 1561 * soreceive to post a buffer or disable DDP. The 1562 * payload that arrived in this indicate is appended to 1563 * the socket buffer as usual. 1564 */ 1565 1566 #if 0 1567 CTR5(KTR_CXGBE, 1568 "%s: tid %u (0x%x) DDP indicate (seq 0x%x, len %d)", 1569 __func__, tid, toep->flags, be32toh(cpl->seq), len); 1570 #endif 1571 sb->sb_flags |= SB_DDP_INDICATE; 1572 } else if ((toep->ddp_flags & (DDP_OK|DDP_SC_REQ)) == DDP_OK && 1573 tp->rcv_wnd > DDP_RSVD_WIN && len >= sc->tt.ddp_thres) { 1574 1575 /* 1576 * DDP allowed but isn't on (and a request to switch it 1577 * on isn't pending either), and conditions are ripe for 1578 * it to work. Switch it on. 1579 */ 1580 1581 enable_ddp(sc, toep); 1582 } 1583 } 1584 1585 KASSERT(toep->sb_cc >= sbused(sb), 1586 ("%s: sb %p has more data (%d) than last time (%d).", 1587 __func__, sb, sbused(sb), toep->sb_cc)); 1588 toep->rx_credits += toep->sb_cc - sbused(sb); 1589 sbappendstream_locked(sb, m, 0); 1590 toep->sb_cc = sbused(sb); 1591 sorwakeup_locked(so); 1592 SOCKBUF_UNLOCK_ASSERT(sb); 1593 1594 INP_WUNLOCK(inp); 1595 return (0); 1596 } 1597 1598 #define S_CPL_FW4_ACK_OPCODE 24 1599 #define M_CPL_FW4_ACK_OPCODE 0xff 1600 #define V_CPL_FW4_ACK_OPCODE(x) ((x) << S_CPL_FW4_ACK_OPCODE) 1601 #define G_CPL_FW4_ACK_OPCODE(x) \ 1602 (((x) >> S_CPL_FW4_ACK_OPCODE) & M_CPL_FW4_ACK_OPCODE) 1603 1604 #define S_CPL_FW4_ACK_FLOWID 0 1605 #define M_CPL_FW4_ACK_FLOWID 0xffffff 1606 #define V_CPL_FW4_ACK_FLOWID(x) ((x) << S_CPL_FW4_ACK_FLOWID) 1607 #define G_CPL_FW4_ACK_FLOWID(x) \ 1608 (((x) >> S_CPL_FW4_ACK_FLOWID) & M_CPL_FW4_ACK_FLOWID) 1609 1610 #define S_CPL_FW4_ACK_CR 24 1611 #define M_CPL_FW4_ACK_CR 0xff 1612 #define V_CPL_FW4_ACK_CR(x) ((x) << S_CPL_FW4_ACK_CR) 1613 #define G_CPL_FW4_ACK_CR(x) (((x) >> S_CPL_FW4_ACK_CR) & M_CPL_FW4_ACK_CR) 1614 1615 #define S_CPL_FW4_ACK_SEQVAL 0 1616 #define M_CPL_FW4_ACK_SEQVAL 0x1 1617 #define V_CPL_FW4_ACK_SEQVAL(x) ((x) << S_CPL_FW4_ACK_SEQVAL) 1618 #define G_CPL_FW4_ACK_SEQVAL(x) \ 1619 (((x) >> S_CPL_FW4_ACK_SEQVAL) & M_CPL_FW4_ACK_SEQVAL) 1620 #define F_CPL_FW4_ACK_SEQVAL V_CPL_FW4_ACK_SEQVAL(1U) 1621 1622 static int 1623 do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1624 { 1625 struct adapter *sc = iq->adapter; 1626 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 1627 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 1628 struct toepcb *toep = lookup_tid(sc, tid); 1629 struct inpcb *inp; 1630 struct tcpcb *tp; 1631 struct socket *so; 1632 uint8_t credits = cpl->credits; 1633 struct ofld_tx_sdesc *txsd; 1634 int plen; 1635 #ifdef INVARIANTS 1636 unsigned int opcode = G_CPL_FW4_ACK_OPCODE(be32toh(OPCODE_TID(cpl))); 1637 #endif 1638 1639 /* 1640 * Very unusual case: we'd sent a flowc + abort_req for a synq entry and 1641 * now this comes back carrying the credits for the flowc. 1642 */ 1643 if (__predict_false(toep->flags & TPF_SYNQE)) { 1644 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1645 ("%s: credits for a synq entry %p", __func__, toep)); 1646 return (0); 1647 } 1648 1649 inp = toep->inp; 1650 1651 KASSERT(opcode == CPL_FW4_ACK, 1652 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1653 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1654 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1655 1656 INP_WLOCK(inp); 1657 1658 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) { 1659 INP_WUNLOCK(inp); 1660 return (0); 1661 } 1662 1663 KASSERT((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0, 1664 ("%s: inp_flags 0x%x", __func__, inp->inp_flags)); 1665 1666 tp = intotcpcb(inp); 1667 1668 if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) { 1669 tcp_seq snd_una = be32toh(cpl->snd_una); 1670 1671 #ifdef INVARIANTS 1672 if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) { 1673 log(LOG_ERR, 1674 "%s: unexpected seq# %x for TID %u, snd_una %x\n", 1675 __func__, snd_una, toep->tid, tp->snd_una); 1676 } 1677 #endif 1678 1679 if (tp->snd_una != snd_una) { 1680 tp->snd_una = snd_una; 1681 tp->ts_recent_age = tcp_ts_getticks(); 1682 } 1683 } 1684 1685 so = inp->inp_socket; 1686 txsd = &toep->txsd[toep->txsd_cidx]; 1687 plen = 0; 1688 while (credits) { 1689 KASSERT(credits >= txsd->tx_credits, 1690 ("%s: too many (or partial) credits", __func__)); 1691 credits -= txsd->tx_credits; 1692 toep->tx_credits += txsd->tx_credits; 1693 plen += txsd->plen; 1694 txsd++; 1695 toep->txsd_avail++; 1696 KASSERT(toep->txsd_avail <= toep->txsd_total, 1697 ("%s: txsd avail > total", __func__)); 1698 if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) { 1699 txsd = &toep->txsd[0]; 1700 toep->txsd_cidx = 0; 1701 } 1702 } 1703 1704 if (toep->tx_credits == toep->tx_total) { 1705 toep->tx_nocompl = 0; 1706 toep->plen_nocompl = 0; 1707 } 1708 1709 if (toep->flags & TPF_TX_SUSPENDED && 1710 toep->tx_credits >= toep->tx_total / 4) { 1711 toep->flags &= ~TPF_TX_SUSPENDED; 1712 if (toep->ulp_mode == ULP_MODE_ISCSI) 1713 t4_ulp_push_frames(sc, toep, plen); 1714 else 1715 t4_push_frames(sc, toep, plen); 1716 } else if (plen > 0) { 1717 struct sockbuf *sb = &so->so_snd; 1718 1719 if (toep->ulp_mode == ULP_MODE_ISCSI) 1720 t4_cpl_iscsi_callback(toep->td, toep, &plen, 1721 CPL_FW4_ACK); 1722 else { 1723 SOCKBUF_LOCK(sb); 1724 sbdrop_locked(sb, plen); 1725 sowwakeup_locked(so); 1726 SOCKBUF_UNLOCK_ASSERT(sb); 1727 } 1728 } 1729 1730 INP_WUNLOCK(inp); 1731 1732 return (0); 1733 } 1734 1735 static int 1736 do_set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1737 { 1738 struct adapter *sc = iq->adapter; 1739 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 1740 unsigned int tid = GET_TID(cpl); 1741 #ifdef INVARIANTS 1742 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1743 #endif 1744 1745 KASSERT(opcode == CPL_SET_TCB_RPL, 1746 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1747 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1748 1749 if (is_ftid(sc, tid)) 1750 return (t4_filter_rpl(iq, rss, m)); /* TCB is a filter */ 1751 else { 1752 struct toepcb *toep = lookup_tid(sc, tid); 1753 1754 t4_cpl_iscsi_callback(toep->td, toep, m, CPL_SET_TCB_RPL); 1755 return (0); 1756 } 1757 1758 CXGBE_UNIMPLEMENTED(__func__); 1759 } 1760 1761 void 1762 t4_set_tcb_field(struct adapter *sc, struct toepcb *toep, int ctrl, 1763 uint16_t word, uint64_t mask, uint64_t val) 1764 { 1765 struct wrqe *wr; 1766 struct cpl_set_tcb_field *req; 1767 1768 wr = alloc_wrqe(sizeof(*req), ctrl ? toep->ctrlq : toep->ofld_txq); 1769 if (wr == NULL) { 1770 /* XXX */ 1771 panic("%s: allocation failure.", __func__); 1772 } 1773 req = wrtod(wr); 1774 1775 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid); 1776 req->reply_ctrl = htobe16(V_NO_REPLY(1) | 1777 V_QUEUENO(toep->ofld_rxq->iq.abs_id)); 1778 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0)); 1779 req->mask = htobe64(mask); 1780 req->val = htobe64(val); 1781 1782 t4_wrq_tx(sc, wr); 1783 } 1784 1785 void 1786 t4_init_cpl_io_handlers(struct adapter *sc) 1787 { 1788 1789 t4_register_cpl_handler(sc, CPL_PEER_CLOSE, do_peer_close); 1790 t4_register_cpl_handler(sc, CPL_CLOSE_CON_RPL, do_close_con_rpl); 1791 t4_register_cpl_handler(sc, CPL_ABORT_REQ_RSS, do_abort_req); 1792 t4_register_cpl_handler(sc, CPL_ABORT_RPL_RSS, do_abort_rpl); 1793 t4_register_cpl_handler(sc, CPL_RX_DATA, do_rx_data); 1794 t4_register_cpl_handler(sc, CPL_FW4_ACK, do_fw4_ack); 1795 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, do_set_tcb_rpl); 1796 } 1797 1798 void 1799 t4_uninit_cpl_io_handlers(struct adapter *sc) 1800 { 1801 1802 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl); 1803 } 1804 #endif 1805