1 /*- 2 * Copyright (c) 2012, 2015 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 33 #ifdef TCP_OFFLOAD 34 #include <sys/param.h> 35 #include <sys/aio.h> 36 #include <sys/file.h> 37 #include <sys/kernel.h> 38 #include <sys/ktr.h> 39 #include <sys/module.h> 40 #include <sys/proc.h> 41 #include <sys/protosw.h> 42 #include <sys/domain.h> 43 #include <sys/socket.h> 44 #include <sys/socketvar.h> 45 #include <sys/sglist.h> 46 #include <sys/taskqueue.h> 47 #include <netinet/in.h> 48 #include <netinet/in_pcb.h> 49 #include <netinet/ip.h> 50 #include <netinet/ip6.h> 51 #define TCPSTATES 52 #include <netinet/tcp_fsm.h> 53 #include <netinet/tcp_seq.h> 54 #include <netinet/tcp_var.h> 55 #include <netinet/toecore.h> 56 57 #include <security/mac/mac_framework.h> 58 59 #include <vm/vm.h> 60 #include <vm/vm_extern.h> 61 #include <vm/pmap.h> 62 #include <vm/vm_map.h> 63 #include <vm/vm_page.h> 64 65 #include "common/common.h" 66 #include "common/t4_msg.h" 67 #include "common/t4_regs.h" 68 #include "common/t4_tcb.h" 69 #include "tom/t4_tom_l2t.h" 70 #include "tom/t4_tom.h" 71 72 VNET_DECLARE(int, tcp_do_autosndbuf); 73 #define V_tcp_do_autosndbuf VNET(tcp_do_autosndbuf) 74 VNET_DECLARE(int, tcp_autosndbuf_inc); 75 #define V_tcp_autosndbuf_inc VNET(tcp_autosndbuf_inc) 76 VNET_DECLARE(int, tcp_autosndbuf_max); 77 #define V_tcp_autosndbuf_max VNET(tcp_autosndbuf_max) 78 VNET_DECLARE(int, tcp_do_autorcvbuf); 79 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf) 80 VNET_DECLARE(int, tcp_autorcvbuf_inc); 81 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc) 82 VNET_DECLARE(int, tcp_autorcvbuf_max); 83 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max) 84 85 #define IS_AIOTX_MBUF(m) \ 86 ((m)->m_flags & M_EXT && (m)->m_ext.ext_flags & EXT_FLAG_AIOTX) 87 88 static void t4_aiotx_cancel(struct kaiocb *job); 89 static void t4_aiotx_queue_toep(struct toepcb *toep); 90 91 static size_t 92 aiotx_mbuf_pgoff(struct mbuf *m) 93 { 94 struct aiotx_buffer *ab; 95 96 MPASS(IS_AIOTX_MBUF(m)); 97 ab = m->m_ext.ext_arg1; 98 return ((ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) % PAGE_SIZE); 99 } 100 101 static vm_page_t * 102 aiotx_mbuf_pages(struct mbuf *m) 103 { 104 struct aiotx_buffer *ab; 105 int npages; 106 107 MPASS(IS_AIOTX_MBUF(m)); 108 ab = m->m_ext.ext_arg1; 109 npages = (ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) / PAGE_SIZE; 110 return (ab->ps.pages + npages); 111 } 112 113 void 114 send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp) 115 { 116 struct wrqe *wr; 117 struct fw_flowc_wr *flowc; 118 unsigned int nparams = ftxp ? 8 : 6, flowclen; 119 struct vi_info *vi = toep->vi; 120 struct port_info *pi = vi->pi; 121 struct adapter *sc = pi->adapter; 122 unsigned int pfvf = G_FW_VIID_PFN(vi->viid) << S_FW_VIID_PFN; 123 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 124 125 KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT), 126 ("%s: flowc for tid %u sent already", __func__, toep->tid)); 127 128 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); 129 130 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq); 131 if (wr == NULL) { 132 /* XXX */ 133 panic("%s: allocation failure.", __func__); 134 } 135 flowc = wrtod(wr); 136 memset(flowc, 0, wr->wr_len); 137 138 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 139 V_FW_FLOWC_WR_NPARAMS(nparams)); 140 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | 141 V_FW_WR_FLOWID(toep->tid)); 142 143 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 144 flowc->mnemval[0].val = htobe32(pfvf); 145 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 146 flowc->mnemval[1].val = htobe32(pi->tx_chan); 147 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 148 flowc->mnemval[2].val = htobe32(pi->tx_chan); 149 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 150 flowc->mnemval[3].val = htobe32(toep->ofld_rxq->iq.abs_id); 151 if (ftxp) { 152 uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf); 153 154 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 155 flowc->mnemval[4].val = htobe32(ftxp->snd_nxt); 156 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 157 flowc->mnemval[5].val = htobe32(ftxp->rcv_nxt); 158 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 159 flowc->mnemval[6].val = htobe32(sndbuf); 160 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 161 flowc->mnemval[7].val = htobe32(ftxp->mss); 162 163 CTR6(KTR_CXGBE, 164 "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x", 165 __func__, toep->tid, ftxp->mss, sndbuf, ftxp->snd_nxt, 166 ftxp->rcv_nxt); 167 } else { 168 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF; 169 flowc->mnemval[4].val = htobe32(512); 170 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS; 171 flowc->mnemval[5].val = htobe32(512); 172 173 CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); 174 } 175 176 txsd->tx_credits = howmany(flowclen, 16); 177 txsd->plen = 0; 178 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 179 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 180 toep->tx_credits -= txsd->tx_credits; 181 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 182 toep->txsd_pidx = 0; 183 toep->txsd_avail--; 184 185 toep->flags |= TPF_FLOWC_WR_SENT; 186 t4_wrq_tx(sc, wr); 187 } 188 189 void 190 send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt) 191 { 192 struct wrqe *wr; 193 struct cpl_abort_req *req; 194 int tid = toep->tid; 195 struct inpcb *inp = toep->inp; 196 struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */ 197 198 INP_WLOCK_ASSERT(inp); 199 200 CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s", 201 __func__, toep->tid, 202 inp->inp_flags & INP_DROPPED ? "inp dropped" : 203 tcpstates[tp->t_state], 204 toep->flags, inp->inp_flags, 205 toep->flags & TPF_ABORT_SHUTDOWN ? 206 " (abort already in progress)" : ""); 207 208 if (toep->flags & TPF_ABORT_SHUTDOWN) 209 return; /* abort already in progress */ 210 211 toep->flags |= TPF_ABORT_SHUTDOWN; 212 213 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 214 ("%s: flowc_wr not sent for tid %d.", __func__, tid)); 215 216 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 217 if (wr == NULL) { 218 /* XXX */ 219 panic("%s: allocation failure.", __func__); 220 } 221 req = wrtod(wr); 222 223 INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid); 224 if (inp->inp_flags & INP_DROPPED) 225 req->rsvd0 = htobe32(snd_nxt); 226 else 227 req->rsvd0 = htobe32(tp->snd_nxt); 228 req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT); 229 req->cmd = CPL_ABORT_SEND_RST; 230 231 /* 232 * XXX: What's the correct way to tell that the inp hasn't been detached 233 * from its socket? Should I even be flushing the snd buffer here? 234 */ 235 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 236 struct socket *so = inp->inp_socket; 237 238 if (so != NULL) /* because I'm not sure. See comment above */ 239 sbflush(&so->so_snd); 240 } 241 242 t4_l2t_send(sc, wr, toep->l2te); 243 } 244 245 /* 246 * Called when a connection is established to translate the TCP options 247 * reported by HW to FreeBSD's native format. 248 */ 249 static void 250 assign_rxopt(struct tcpcb *tp, unsigned int opt) 251 { 252 struct toepcb *toep = tp->t_toe; 253 struct inpcb *inp = tp->t_inpcb; 254 struct adapter *sc = td_adapter(toep->td); 255 int n; 256 257 INP_LOCK_ASSERT(inp); 258 259 if (inp->inp_inc.inc_flags & INC_ISIPV6) 260 n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 261 else 262 n = sizeof(struct ip) + sizeof(struct tcphdr); 263 tp->t_maxseg = sc->params.mtus[G_TCPOPT_MSS(opt)] - n; 264 265 CTR4(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u)", __func__, toep->tid, 266 G_TCPOPT_MSS(opt), sc->params.mtus[G_TCPOPT_MSS(opt)]); 267 268 if (G_TCPOPT_TSTAMP(opt)) { 269 tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */ 270 tp->ts_recent = 0; /* hmmm */ 271 tp->ts_recent_age = tcp_ts_getticks(); 272 } 273 274 if (G_TCPOPT_SACK(opt)) 275 tp->t_flags |= TF_SACK_PERMIT; /* should already be set */ 276 else 277 tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */ 278 279 if (G_TCPOPT_WSCALE_OK(opt)) 280 tp->t_flags |= TF_RCVD_SCALE; 281 282 /* Doing window scaling? */ 283 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 284 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 285 tp->rcv_scale = tp->request_r_scale; 286 tp->snd_scale = G_TCPOPT_SND_WSCALE(opt); 287 } 288 } 289 290 /* 291 * Completes some final bits of initialization for just established connections 292 * and changes their state to TCPS_ESTABLISHED. 293 * 294 * The ISNs are from after the exchange of SYNs. i.e., the true ISN + 1. 295 */ 296 void 297 make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn, 298 uint16_t opt) 299 { 300 struct inpcb *inp = toep->inp; 301 struct socket *so = inp->inp_socket; 302 struct tcpcb *tp = intotcpcb(inp); 303 long bufsize; 304 uint32_t iss = be32toh(snd_isn) - 1; /* true ISS */ 305 uint32_t irs = be32toh(rcv_isn) - 1; /* true IRS */ 306 uint16_t tcpopt = be16toh(opt); 307 struct flowc_tx_params ftxp; 308 309 INP_WLOCK_ASSERT(inp); 310 KASSERT(tp->t_state == TCPS_SYN_SENT || 311 tp->t_state == TCPS_SYN_RECEIVED, 312 ("%s: TCP state %s", __func__, tcpstates[tp->t_state])); 313 314 CTR4(KTR_CXGBE, "%s: tid %d, toep %p, inp %p", 315 __func__, toep->tid, toep, inp); 316 317 tp->t_state = TCPS_ESTABLISHED; 318 tp->t_starttime = ticks; 319 TCPSTAT_INC(tcps_connects); 320 321 tp->irs = irs; 322 tcp_rcvseqinit(tp); 323 tp->rcv_wnd = toep->rx_credits << 10; 324 tp->rcv_adv += tp->rcv_wnd; 325 tp->last_ack_sent = tp->rcv_nxt; 326 327 /* 328 * If we were unable to send all rx credits via opt0, save the remainder 329 * in rx_credits so that they can be handed over with the next credit 330 * update. 331 */ 332 SOCKBUF_LOCK(&so->so_rcv); 333 bufsize = select_rcv_wnd(so); 334 SOCKBUF_UNLOCK(&so->so_rcv); 335 toep->rx_credits = bufsize - tp->rcv_wnd; 336 337 tp->iss = iss; 338 tcp_sendseqinit(tp); 339 tp->snd_una = iss + 1; 340 tp->snd_nxt = iss + 1; 341 tp->snd_max = iss + 1; 342 343 assign_rxopt(tp, tcpopt); 344 345 SOCKBUF_LOCK(&so->so_snd); 346 if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf) 347 bufsize = V_tcp_autosndbuf_max; 348 else 349 bufsize = sbspace(&so->so_snd); 350 SOCKBUF_UNLOCK(&so->so_snd); 351 352 ftxp.snd_nxt = tp->snd_nxt; 353 ftxp.rcv_nxt = tp->rcv_nxt; 354 ftxp.snd_space = bufsize; 355 ftxp.mss = tp->t_maxseg; 356 send_flowc_wr(toep, &ftxp); 357 358 soisconnected(so); 359 } 360 361 static int 362 send_rx_credits(struct adapter *sc, struct toepcb *toep, int credits) 363 { 364 struct wrqe *wr; 365 struct cpl_rx_data_ack *req; 366 uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); 367 368 KASSERT(credits >= 0, ("%s: %d credits", __func__, credits)); 369 370 wr = alloc_wrqe(sizeof(*req), toep->ctrlq); 371 if (wr == NULL) 372 return (0); 373 req = wrtod(wr); 374 375 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); 376 req->credit_dack = htobe32(dack | V_RX_CREDITS(credits)); 377 378 t4_wrq_tx(sc, wr); 379 return (credits); 380 } 381 382 void 383 t4_rcvd_locked(struct toedev *tod, struct tcpcb *tp) 384 { 385 struct adapter *sc = tod->tod_softc; 386 struct inpcb *inp = tp->t_inpcb; 387 struct socket *so = inp->inp_socket; 388 struct sockbuf *sb = &so->so_rcv; 389 struct toepcb *toep = tp->t_toe; 390 int credits; 391 392 INP_WLOCK_ASSERT(inp); 393 394 SOCKBUF_LOCK_ASSERT(sb); 395 KASSERT(toep->sb_cc >= sbused(sb), 396 ("%s: sb %p has more data (%d) than last time (%d).", 397 __func__, sb, sbused(sb), toep->sb_cc)); 398 399 toep->rx_credits += toep->sb_cc - sbused(sb); 400 toep->sb_cc = sbused(sb); 401 402 if (toep->rx_credits > 0 && 403 (tp->rcv_wnd <= 32 * 1024 || toep->rx_credits >= 64 * 1024 || 404 (toep->rx_credits >= 16 * 1024 && tp->rcv_wnd <= 128 * 1024) || 405 toep->sb_cc + tp->rcv_wnd < sb->sb_lowat)) { 406 407 credits = send_rx_credits(sc, toep, toep->rx_credits); 408 toep->rx_credits -= credits; 409 tp->rcv_wnd += credits; 410 tp->rcv_adv += credits; 411 } 412 } 413 414 void 415 t4_rcvd(struct toedev *tod, struct tcpcb *tp) 416 { 417 struct inpcb *inp = tp->t_inpcb; 418 struct socket *so = inp->inp_socket; 419 struct sockbuf *sb = &so->so_rcv; 420 421 SOCKBUF_LOCK(sb); 422 t4_rcvd_locked(tod, tp); 423 SOCKBUF_UNLOCK(sb); 424 } 425 426 /* 427 * Close a connection by sending a CPL_CLOSE_CON_REQ message. 428 */ 429 static int 430 close_conn(struct adapter *sc, struct toepcb *toep) 431 { 432 struct wrqe *wr; 433 struct cpl_close_con_req *req; 434 unsigned int tid = toep->tid; 435 436 CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid, 437 toep->flags & TPF_FIN_SENT ? ", IGNORED" : ""); 438 439 if (toep->flags & TPF_FIN_SENT) 440 return (0); 441 442 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 443 ("%s: flowc_wr not sent for tid %u.", __func__, tid)); 444 445 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 446 if (wr == NULL) { 447 /* XXX */ 448 panic("%s: allocation failure.", __func__); 449 } 450 req = wrtod(wr); 451 452 req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | 453 V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr))); 454 req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) | 455 V_FW_WR_FLOWID(tid)); 456 req->wr.wr_lo = cpu_to_be64(0); 457 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 458 req->rsvd = 0; 459 460 toep->flags |= TPF_FIN_SENT; 461 toep->flags &= ~TPF_SEND_FIN; 462 t4_l2t_send(sc, wr, toep->l2te); 463 464 return (0); 465 } 466 467 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16) 468 #define MIN_OFLD_TX_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16)) 469 470 /* Maximum amount of immediate data we could stuff in a WR */ 471 static inline int 472 max_imm_payload(int tx_credits) 473 { 474 const int n = 2; /* Use only up to 2 desc for imm. data WR */ 475 476 KASSERT(tx_credits >= 0 && 477 tx_credits <= MAX_OFLD_TX_CREDITS, 478 ("%s: %d credits", __func__, tx_credits)); 479 480 if (tx_credits < MIN_OFLD_TX_CREDITS) 481 return (0); 482 483 if (tx_credits >= (n * EQ_ESIZE) / 16) 484 return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr)); 485 else 486 return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr)); 487 } 488 489 /* Maximum number of SGL entries we could stuff in a WR */ 490 static inline int 491 max_dsgl_nsegs(int tx_credits) 492 { 493 int nseg = 1; /* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */ 494 int sge_pair_credits = tx_credits - MIN_OFLD_TX_CREDITS; 495 496 KASSERT(tx_credits >= 0 && 497 tx_credits <= MAX_OFLD_TX_CREDITS, 498 ("%s: %d credits", __func__, tx_credits)); 499 500 if (tx_credits < MIN_OFLD_TX_CREDITS) 501 return (0); 502 503 nseg += 2 * (sge_pair_credits * 16 / 24); 504 if ((sge_pair_credits * 16) % 24 == 16) 505 nseg++; 506 507 return (nseg); 508 } 509 510 static inline void 511 write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen, 512 unsigned int plen, uint8_t credits, int shove, int ulp_submode, int txalign) 513 { 514 struct fw_ofld_tx_data_wr *txwr = dst; 515 516 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) | 517 V_FW_WR_IMMDLEN(immdlen)); 518 txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) | 519 V_FW_WR_LEN16(credits)); 520 txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(toep->ulp_mode) | 521 V_TX_ULP_SUBMODE(ulp_submode) | V_TX_URG(0) | V_TX_SHOVE(shove)); 522 txwr->plen = htobe32(plen); 523 524 if (txalign > 0) { 525 struct tcpcb *tp = intotcpcb(toep->inp); 526 527 if (plen < 2 * tp->t_maxseg || is_10G_port(toep->vi->pi)) 528 txwr->lsodisable_to_flags |= 529 htobe32(F_FW_OFLD_TX_DATA_WR_LSODISABLE); 530 else 531 txwr->lsodisable_to_flags |= 532 htobe32(F_FW_OFLD_TX_DATA_WR_ALIGNPLD | 533 (tp->t_flags & TF_NODELAY ? 0 : 534 F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE)); 535 } 536 } 537 538 /* 539 * Generate a DSGL from a starting mbuf. The total number of segments and the 540 * maximum segments in any one mbuf are provided. 541 */ 542 static void 543 write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n) 544 { 545 struct mbuf *m; 546 struct ulptx_sgl *usgl = dst; 547 int i, j, rc; 548 struct sglist sg; 549 struct sglist_seg segs[n]; 550 551 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__)); 552 553 sglist_init(&sg, n, segs); 554 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 555 V_ULPTX_NSGE(nsegs)); 556 557 i = -1; 558 for (m = start; m != stop; m = m->m_next) { 559 if (IS_AIOTX_MBUF(m)) 560 rc = sglist_append_vmpages(&sg, aiotx_mbuf_pages(m), 561 aiotx_mbuf_pgoff(m), m->m_len); 562 else 563 rc = sglist_append(&sg, mtod(m, void *), m->m_len); 564 if (__predict_false(rc != 0)) 565 panic("%s: sglist_append %d", __func__, rc); 566 567 for (j = 0; j < sg.sg_nseg; i++, j++) { 568 if (i < 0) { 569 usgl->len0 = htobe32(segs[j].ss_len); 570 usgl->addr0 = htobe64(segs[j].ss_paddr); 571 } else { 572 usgl->sge[i / 2].len[i & 1] = 573 htobe32(segs[j].ss_len); 574 usgl->sge[i / 2].addr[i & 1] = 575 htobe64(segs[j].ss_paddr); 576 } 577 #ifdef INVARIANTS 578 nsegs--; 579 #endif 580 } 581 sglist_reset(&sg); 582 } 583 if (i & 1) 584 usgl->sge[i / 2].len[1] = htobe32(0); 585 KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, stop %p", 586 __func__, nsegs, start, stop)); 587 } 588 589 /* 590 * Max number of SGL entries an offload tx work request can have. This is 41 591 * (1 + 40) for a full 512B work request. 592 * fw_ofld_tx_data_wr(16B) + ulptx_sgl(16B, 1) + ulptx_sge_pair(480B, 40) 593 */ 594 #define OFLD_SGL_LEN (41) 595 596 /* 597 * Send data and/or a FIN to the peer. 598 * 599 * The socket's so_snd buffer consists of a stream of data starting with sb_mb 600 * and linked together with m_next. sb_sndptr, if set, is the last mbuf that 601 * was transmitted. 602 * 603 * drop indicates the number of bytes that should be dropped from the head of 604 * the send buffer. It is an optimization that lets do_fw4_ack avoid creating 605 * contention on the send buffer lock (before this change it used to do 606 * sowwakeup and then t4_push_frames right after that when recovering from tx 607 * stalls). When drop is set this function MUST drop the bytes and wake up any 608 * writers. 609 */ 610 void 611 t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop) 612 { 613 struct mbuf *sndptr, *m, *sb_sndptr; 614 struct fw_ofld_tx_data_wr *txwr; 615 struct wrqe *wr; 616 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 617 struct inpcb *inp = toep->inp; 618 struct tcpcb *tp = intotcpcb(inp); 619 struct socket *so = inp->inp_socket; 620 struct sockbuf *sb = &so->so_snd; 621 int tx_credits, shove, compl, sowwakeup; 622 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 623 bool aiotx_mbuf_seen; 624 625 INP_WLOCK_ASSERT(inp); 626 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 627 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 628 629 KASSERT(toep->ulp_mode == ULP_MODE_NONE || 630 toep->ulp_mode == ULP_MODE_TCPDDP || 631 toep->ulp_mode == ULP_MODE_RDMA, 632 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 633 634 #ifdef VERBOSE_TRACES 635 CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d", 636 __func__, toep->tid, toep->flags, tp->t_flags); 637 #endif 638 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) 639 return; 640 641 /* 642 * This function doesn't resume by itself. Someone else must clear the 643 * flag and call this function. 644 */ 645 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 646 KASSERT(drop == 0, 647 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 648 return; 649 } 650 651 do { 652 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 653 max_imm = max_imm_payload(tx_credits); 654 max_nsegs = max_dsgl_nsegs(tx_credits); 655 656 SOCKBUF_LOCK(sb); 657 sowwakeup = drop; 658 if (drop) { 659 sbdrop_locked(sb, drop); 660 drop = 0; 661 } 662 sb_sndptr = sb->sb_sndptr; 663 sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb; 664 plen = 0; 665 nsegs = 0; 666 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 667 aiotx_mbuf_seen = false; 668 for (m = sndptr; m != NULL; m = m->m_next) { 669 int n; 670 671 if (IS_AIOTX_MBUF(m)) 672 n = sglist_count_vmpages(aiotx_mbuf_pages(m), 673 aiotx_mbuf_pgoff(m), m->m_len); 674 else 675 n = sglist_count(mtod(m, void *), m->m_len); 676 677 nsegs += n; 678 plen += m->m_len; 679 680 /* This mbuf sent us _over_ the nsegs limit, back out */ 681 if (plen > max_imm && nsegs > max_nsegs) { 682 nsegs -= n; 683 plen -= m->m_len; 684 if (plen == 0) { 685 /* Too few credits */ 686 toep->flags |= TPF_TX_SUSPENDED; 687 if (sowwakeup) { 688 if (!TAILQ_EMPTY( 689 &toep->aiotx_jobq)) 690 t4_aiotx_queue_toep( 691 toep); 692 sowwakeup_locked(so); 693 } else 694 SOCKBUF_UNLOCK(sb); 695 SOCKBUF_UNLOCK_ASSERT(sb); 696 return; 697 } 698 break; 699 } 700 701 if (IS_AIOTX_MBUF(m)) 702 aiotx_mbuf_seen = true; 703 if (max_nsegs_1mbuf < n) 704 max_nsegs_1mbuf = n; 705 sb_sndptr = m; /* new sb->sb_sndptr if all goes well */ 706 707 /* This mbuf put us right at the max_nsegs limit */ 708 if (plen > max_imm && nsegs == max_nsegs) { 709 m = m->m_next; 710 break; 711 } 712 } 713 714 if (sbused(sb) > sb->sb_hiwat * 5 / 8 && 715 toep->plen_nocompl + plen >= sb->sb_hiwat / 4) 716 compl = 1; 717 else 718 compl = 0; 719 720 if (sb->sb_flags & SB_AUTOSIZE && 721 V_tcp_do_autosndbuf && 722 sb->sb_hiwat < V_tcp_autosndbuf_max && 723 sbused(sb) >= sb->sb_hiwat * 7 / 8) { 724 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc, 725 V_tcp_autosndbuf_max); 726 727 if (!sbreserve_locked(sb, newsize, so, NULL)) 728 sb->sb_flags &= ~SB_AUTOSIZE; 729 else 730 sowwakeup = 1; /* room available */ 731 } 732 if (sowwakeup) { 733 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) 734 t4_aiotx_queue_toep(toep); 735 sowwakeup_locked(so); 736 } else 737 SOCKBUF_UNLOCK(sb); 738 SOCKBUF_UNLOCK_ASSERT(sb); 739 740 /* nothing to send */ 741 if (plen == 0) { 742 KASSERT(m == NULL, 743 ("%s: nothing to send, but m != NULL", __func__)); 744 break; 745 } 746 747 if (__predict_false(toep->flags & TPF_FIN_SENT)) 748 panic("%s: excess tx.", __func__); 749 750 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME); 751 if (plen <= max_imm && !aiotx_mbuf_seen) { 752 753 /* Immediate data tx */ 754 755 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 756 toep->ofld_txq); 757 if (wr == NULL) { 758 /* XXX: how will we recover from this? */ 759 toep->flags |= TPF_TX_SUSPENDED; 760 return; 761 } 762 txwr = wrtod(wr); 763 credits = howmany(wr->wr_len, 16); 764 write_tx_wr(txwr, toep, plen, plen, credits, shove, 0, 765 sc->tt.tx_align); 766 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 767 nsegs = 0; 768 } else { 769 int wr_len; 770 771 /* DSGL tx */ 772 773 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 774 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 775 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 776 if (wr == NULL) { 777 /* XXX: how will we recover from this? */ 778 toep->flags |= TPF_TX_SUSPENDED; 779 return; 780 } 781 txwr = wrtod(wr); 782 credits = howmany(wr_len, 16); 783 write_tx_wr(txwr, toep, 0, plen, credits, shove, 0, 784 sc->tt.tx_align); 785 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 786 max_nsegs_1mbuf); 787 if (wr_len & 0xf) { 788 uint64_t *pad = (uint64_t *) 789 ((uintptr_t)txwr + wr_len); 790 *pad = 0; 791 } 792 } 793 794 KASSERT(toep->tx_credits >= credits, 795 ("%s: not enough credits", __func__)); 796 797 toep->tx_credits -= credits; 798 toep->tx_nocompl += credits; 799 toep->plen_nocompl += plen; 800 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 801 toep->tx_nocompl >= toep->tx_total / 4) 802 compl = 1; 803 804 if (compl || toep->ulp_mode == ULP_MODE_RDMA) { 805 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 806 toep->tx_nocompl = 0; 807 toep->plen_nocompl = 0; 808 } 809 810 tp->snd_nxt += plen; 811 tp->snd_max += plen; 812 813 SOCKBUF_LOCK(sb); 814 KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__)); 815 sb->sb_sndptr = sb_sndptr; 816 SOCKBUF_UNLOCK(sb); 817 818 toep->flags |= TPF_TX_DATA_SENT; 819 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 820 toep->flags |= TPF_TX_SUSPENDED; 821 822 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 823 txsd->plen = plen; 824 txsd->tx_credits = credits; 825 txsd++; 826 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 827 toep->txsd_pidx = 0; 828 txsd = &toep->txsd[0]; 829 } 830 toep->txsd_avail--; 831 832 t4_l2t_send(sc, wr, toep->l2te); 833 } while (m != NULL); 834 835 /* Send a FIN if requested, but only if there's no more data to send */ 836 if (m == NULL && toep->flags & TPF_SEND_FIN) 837 close_conn(sc, toep); 838 } 839 840 static inline void 841 rqdrop_locked(struct mbufq *q, int plen) 842 { 843 struct mbuf *m; 844 845 while (plen > 0) { 846 m = mbufq_dequeue(q); 847 848 /* Too many credits. */ 849 MPASS(m != NULL); 850 M_ASSERTPKTHDR(m); 851 852 /* Partial credits. */ 853 MPASS(plen >= m->m_pkthdr.len); 854 855 plen -= m->m_pkthdr.len; 856 m_freem(m); 857 } 858 } 859 860 void 861 t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop) 862 { 863 struct mbuf *sndptr, *m; 864 struct fw_ofld_tx_data_wr *txwr; 865 struct wrqe *wr; 866 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 867 u_int adjusted_plen, ulp_submode; 868 struct inpcb *inp = toep->inp; 869 struct tcpcb *tp = intotcpcb(inp); 870 int tx_credits, shove; 871 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 872 struct mbufq *pduq = &toep->ulp_pduq; 873 static const u_int ulp_extra_len[] = {0, 4, 4, 8}; 874 875 INP_WLOCK_ASSERT(inp); 876 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 877 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 878 KASSERT(toep->ulp_mode == ULP_MODE_ISCSI, 879 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 880 881 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) 882 return; 883 884 /* 885 * This function doesn't resume by itself. Someone else must clear the 886 * flag and call this function. 887 */ 888 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 889 KASSERT(drop == 0, 890 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 891 return; 892 } 893 894 if (drop) 895 rqdrop_locked(&toep->ulp_pdu_reclaimq, drop); 896 897 while ((sndptr = mbufq_first(pduq)) != NULL) { 898 M_ASSERTPKTHDR(sndptr); 899 900 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 901 max_imm = max_imm_payload(tx_credits); 902 max_nsegs = max_dsgl_nsegs(tx_credits); 903 904 plen = 0; 905 nsegs = 0; 906 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 907 for (m = sndptr; m != NULL; m = m->m_next) { 908 int n = sglist_count(mtod(m, void *), m->m_len); 909 910 nsegs += n; 911 plen += m->m_len; 912 913 /* 914 * This mbuf would send us _over_ the nsegs limit. 915 * Suspend tx because the PDU can't be sent out. 916 */ 917 if (plen > max_imm && nsegs > max_nsegs) { 918 toep->flags |= TPF_TX_SUSPENDED; 919 return; 920 } 921 922 if (max_nsegs_1mbuf < n) 923 max_nsegs_1mbuf = n; 924 } 925 926 if (__predict_false(toep->flags & TPF_FIN_SENT)) 927 panic("%s: excess tx.", __func__); 928 929 /* 930 * We have a PDU to send. All of it goes out in one WR so 'm' 931 * is NULL. A PDU's length is always a multiple of 4. 932 */ 933 MPASS(m == NULL); 934 MPASS((plen & 3) == 0); 935 MPASS(sndptr->m_pkthdr.len == plen); 936 937 shove = !(tp->t_flags & TF_MORETOCOME); 938 ulp_submode = mbuf_ulp_submode(sndptr); 939 MPASS(ulp_submode < nitems(ulp_extra_len)); 940 941 /* 942 * plen doesn't include header and data digests, which are 943 * generated and inserted in the right places by the TOE, but 944 * they do occupy TCP sequence space and need to be accounted 945 * for. 946 */ 947 adjusted_plen = plen + ulp_extra_len[ulp_submode]; 948 if (plen <= max_imm) { 949 950 /* Immediate data tx */ 951 952 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 953 toep->ofld_txq); 954 if (wr == NULL) { 955 /* XXX: how will we recover from this? */ 956 toep->flags |= TPF_TX_SUSPENDED; 957 return; 958 } 959 txwr = wrtod(wr); 960 credits = howmany(wr->wr_len, 16); 961 write_tx_wr(txwr, toep, plen, adjusted_plen, credits, 962 shove, ulp_submode, sc->tt.tx_align); 963 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 964 nsegs = 0; 965 } else { 966 int wr_len; 967 968 /* DSGL tx */ 969 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 970 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 971 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 972 if (wr == NULL) { 973 /* XXX: how will we recover from this? */ 974 toep->flags |= TPF_TX_SUSPENDED; 975 return; 976 } 977 txwr = wrtod(wr); 978 credits = howmany(wr_len, 16); 979 write_tx_wr(txwr, toep, 0, adjusted_plen, credits, 980 shove, ulp_submode, sc->tt.tx_align); 981 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 982 max_nsegs_1mbuf); 983 if (wr_len & 0xf) { 984 uint64_t *pad = (uint64_t *) 985 ((uintptr_t)txwr + wr_len); 986 *pad = 0; 987 } 988 } 989 990 KASSERT(toep->tx_credits >= credits, 991 ("%s: not enough credits", __func__)); 992 993 m = mbufq_dequeue(pduq); 994 MPASS(m == sndptr); 995 mbufq_enqueue(&toep->ulp_pdu_reclaimq, m); 996 997 toep->tx_credits -= credits; 998 toep->tx_nocompl += credits; 999 toep->plen_nocompl += plen; 1000 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 1001 toep->tx_nocompl >= toep->tx_total / 4) { 1002 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 1003 toep->tx_nocompl = 0; 1004 toep->plen_nocompl = 0; 1005 } 1006 1007 tp->snd_nxt += adjusted_plen; 1008 tp->snd_max += adjusted_plen; 1009 1010 toep->flags |= TPF_TX_DATA_SENT; 1011 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 1012 toep->flags |= TPF_TX_SUSPENDED; 1013 1014 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 1015 txsd->plen = plen; 1016 txsd->tx_credits = credits; 1017 txsd++; 1018 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 1019 toep->txsd_pidx = 0; 1020 txsd = &toep->txsd[0]; 1021 } 1022 toep->txsd_avail--; 1023 1024 t4_l2t_send(sc, wr, toep->l2te); 1025 } 1026 1027 /* Send a FIN if requested, but only if there are no more PDUs to send */ 1028 if (mbufq_first(pduq) == NULL && toep->flags & TPF_SEND_FIN) 1029 close_conn(sc, toep); 1030 } 1031 1032 int 1033 t4_tod_output(struct toedev *tod, struct tcpcb *tp) 1034 { 1035 struct adapter *sc = tod->tod_softc; 1036 #ifdef INVARIANTS 1037 struct inpcb *inp = tp->t_inpcb; 1038 #endif 1039 struct toepcb *toep = tp->t_toe; 1040 1041 INP_WLOCK_ASSERT(inp); 1042 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1043 ("%s: inp %p dropped.", __func__, inp)); 1044 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1045 1046 if (toep->ulp_mode == ULP_MODE_ISCSI) 1047 t4_push_pdus(sc, toep, 0); 1048 else 1049 t4_push_frames(sc, toep, 0); 1050 1051 return (0); 1052 } 1053 1054 int 1055 t4_send_fin(struct toedev *tod, struct tcpcb *tp) 1056 { 1057 struct adapter *sc = tod->tod_softc; 1058 #ifdef INVARIANTS 1059 struct inpcb *inp = tp->t_inpcb; 1060 #endif 1061 struct toepcb *toep = tp->t_toe; 1062 1063 INP_WLOCK_ASSERT(inp); 1064 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1065 ("%s: inp %p dropped.", __func__, inp)); 1066 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1067 1068 toep->flags |= TPF_SEND_FIN; 1069 if (tp->t_state >= TCPS_ESTABLISHED) { 1070 if (toep->ulp_mode == ULP_MODE_ISCSI) 1071 t4_push_pdus(sc, toep, 0); 1072 else 1073 t4_push_frames(sc, toep, 0); 1074 } 1075 1076 return (0); 1077 } 1078 1079 int 1080 t4_send_rst(struct toedev *tod, struct tcpcb *tp) 1081 { 1082 struct adapter *sc = tod->tod_softc; 1083 #if defined(INVARIANTS) 1084 struct inpcb *inp = tp->t_inpcb; 1085 #endif 1086 struct toepcb *toep = tp->t_toe; 1087 1088 INP_WLOCK_ASSERT(inp); 1089 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1090 ("%s: inp %p dropped.", __func__, inp)); 1091 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1092 1093 /* hmmmm */ 1094 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 1095 ("%s: flowc for tid %u [%s] not sent already", 1096 __func__, toep->tid, tcpstates[tp->t_state])); 1097 1098 send_reset(sc, toep, 0); 1099 return (0); 1100 } 1101 1102 /* 1103 * Peer has sent us a FIN. 1104 */ 1105 static int 1106 do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1107 { 1108 struct adapter *sc = iq->adapter; 1109 const struct cpl_peer_close *cpl = (const void *)(rss + 1); 1110 unsigned int tid = GET_TID(cpl); 1111 struct toepcb *toep = lookup_tid(sc, tid); 1112 struct inpcb *inp = toep->inp; 1113 struct tcpcb *tp = NULL; 1114 struct socket *so; 1115 #ifdef INVARIANTS 1116 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1117 #endif 1118 1119 KASSERT(opcode == CPL_PEER_CLOSE, 1120 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1121 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1122 1123 if (__predict_false(toep->flags & TPF_SYNQE)) { 1124 #ifdef INVARIANTS 1125 struct synq_entry *synqe = (void *)toep; 1126 1127 INP_WLOCK(synqe->lctx->inp); 1128 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1129 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1130 ("%s: listen socket closed but tid %u not aborted.", 1131 __func__, tid)); 1132 } else { 1133 /* 1134 * do_pass_accept_req is still running and will 1135 * eventually take care of this tid. 1136 */ 1137 } 1138 INP_WUNLOCK(synqe->lctx->inp); 1139 #endif 1140 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1141 toep, toep->flags); 1142 return (0); 1143 } 1144 1145 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1146 1147 CURVNET_SET(toep->vnet); 1148 INP_INFO_RLOCK(&V_tcbinfo); 1149 INP_WLOCK(inp); 1150 tp = intotcpcb(inp); 1151 1152 CTR5(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x, inp %p", __func__, 1153 tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, inp); 1154 1155 if (toep->flags & TPF_ABORT_SHUTDOWN) 1156 goto done; 1157 1158 tp->rcv_nxt++; /* FIN */ 1159 1160 so = inp->inp_socket; 1161 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1162 DDP_LOCK(toep); 1163 if (__predict_false(toep->ddp_flags & 1164 (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE))) 1165 handle_ddp_close(toep, tp, cpl->rcv_nxt); 1166 DDP_UNLOCK(toep); 1167 } 1168 socantrcvmore(so); 1169 1170 if (toep->ulp_mode != ULP_MODE_RDMA) { 1171 KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt), 1172 ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt, 1173 be32toh(cpl->rcv_nxt))); 1174 } 1175 1176 switch (tp->t_state) { 1177 case TCPS_SYN_RECEIVED: 1178 tp->t_starttime = ticks; 1179 /* FALLTHROUGH */ 1180 1181 case TCPS_ESTABLISHED: 1182 tp->t_state = TCPS_CLOSE_WAIT; 1183 break; 1184 1185 case TCPS_FIN_WAIT_1: 1186 tp->t_state = TCPS_CLOSING; 1187 break; 1188 1189 case TCPS_FIN_WAIT_2: 1190 tcp_twstart(tp); 1191 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1192 INP_INFO_RUNLOCK(&V_tcbinfo); 1193 CURVNET_RESTORE(); 1194 1195 INP_WLOCK(inp); 1196 final_cpl_received(toep); 1197 return (0); 1198 1199 default: 1200 log(LOG_ERR, "%s: TID %u received CPL_PEER_CLOSE in state %d\n", 1201 __func__, tid, tp->t_state); 1202 } 1203 done: 1204 INP_WUNLOCK(inp); 1205 INP_INFO_RUNLOCK(&V_tcbinfo); 1206 CURVNET_RESTORE(); 1207 return (0); 1208 } 1209 1210 /* 1211 * Peer has ACK'd our FIN. 1212 */ 1213 static int 1214 do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss, 1215 struct mbuf *m) 1216 { 1217 struct adapter *sc = iq->adapter; 1218 const struct cpl_close_con_rpl *cpl = (const void *)(rss + 1); 1219 unsigned int tid = GET_TID(cpl); 1220 struct toepcb *toep = lookup_tid(sc, tid); 1221 struct inpcb *inp = toep->inp; 1222 struct tcpcb *tp = NULL; 1223 struct socket *so = NULL; 1224 #ifdef INVARIANTS 1225 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1226 #endif 1227 1228 KASSERT(opcode == CPL_CLOSE_CON_RPL, 1229 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1230 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1231 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1232 1233 CURVNET_SET(toep->vnet); 1234 INP_INFO_RLOCK(&V_tcbinfo); 1235 INP_WLOCK(inp); 1236 tp = intotcpcb(inp); 1237 1238 CTR4(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x", 1239 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags); 1240 1241 if (toep->flags & TPF_ABORT_SHUTDOWN) 1242 goto done; 1243 1244 so = inp->inp_socket; 1245 tp->snd_una = be32toh(cpl->snd_nxt) - 1; /* exclude FIN */ 1246 1247 switch (tp->t_state) { 1248 case TCPS_CLOSING: /* see TCPS_FIN_WAIT_2 in do_peer_close too */ 1249 tcp_twstart(tp); 1250 release: 1251 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1252 INP_INFO_RUNLOCK(&V_tcbinfo); 1253 CURVNET_RESTORE(); 1254 1255 INP_WLOCK(inp); 1256 final_cpl_received(toep); /* no more CPLs expected */ 1257 1258 return (0); 1259 case TCPS_LAST_ACK: 1260 if (tcp_close(tp)) 1261 INP_WUNLOCK(inp); 1262 goto release; 1263 1264 case TCPS_FIN_WAIT_1: 1265 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 1266 soisdisconnected(so); 1267 tp->t_state = TCPS_FIN_WAIT_2; 1268 break; 1269 1270 default: 1271 log(LOG_ERR, 1272 "%s: TID %u received CPL_CLOSE_CON_RPL in state %s\n", 1273 __func__, tid, tcpstates[tp->t_state]); 1274 } 1275 done: 1276 INP_WUNLOCK(inp); 1277 INP_INFO_RUNLOCK(&V_tcbinfo); 1278 CURVNET_RESTORE(); 1279 return (0); 1280 } 1281 1282 void 1283 send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid, 1284 int rst_status) 1285 { 1286 struct wrqe *wr; 1287 struct cpl_abort_rpl *cpl; 1288 1289 wr = alloc_wrqe(sizeof(*cpl), ofld_txq); 1290 if (wr == NULL) { 1291 /* XXX */ 1292 panic("%s: allocation failure.", __func__); 1293 } 1294 cpl = wrtod(wr); 1295 1296 INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid); 1297 cpl->cmd = rst_status; 1298 1299 t4_wrq_tx(sc, wr); 1300 } 1301 1302 static int 1303 abort_status_to_errno(struct tcpcb *tp, unsigned int abort_reason) 1304 { 1305 switch (abort_reason) { 1306 case CPL_ERR_BAD_SYN: 1307 case CPL_ERR_CONN_RESET: 1308 return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET); 1309 case CPL_ERR_XMIT_TIMEDOUT: 1310 case CPL_ERR_PERSIST_TIMEDOUT: 1311 case CPL_ERR_FINWAIT2_TIMEDOUT: 1312 case CPL_ERR_KEEPALIVE_TIMEDOUT: 1313 return (ETIMEDOUT); 1314 default: 1315 return (EIO); 1316 } 1317 } 1318 1319 /* 1320 * TCP RST from the peer, timeout, or some other such critical error. 1321 */ 1322 static int 1323 do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1324 { 1325 struct adapter *sc = iq->adapter; 1326 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); 1327 unsigned int tid = GET_TID(cpl); 1328 struct toepcb *toep = lookup_tid(sc, tid); 1329 struct sge_wrq *ofld_txq = toep->ofld_txq; 1330 struct inpcb *inp; 1331 struct tcpcb *tp; 1332 #ifdef INVARIANTS 1333 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1334 #endif 1335 1336 KASSERT(opcode == CPL_ABORT_REQ_RSS, 1337 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1338 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1339 1340 if (toep->flags & TPF_SYNQE) 1341 return (do_abort_req_synqe(iq, rss, m)); 1342 1343 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1344 1345 if (negative_advice(cpl->status)) { 1346 CTR4(KTR_CXGBE, "%s: negative advice %d for tid %d (0x%x)", 1347 __func__, cpl->status, tid, toep->flags); 1348 return (0); /* Ignore negative advice */ 1349 } 1350 1351 inp = toep->inp; 1352 CURVNET_SET(toep->vnet); 1353 INP_INFO_RLOCK(&V_tcbinfo); /* for tcp_close */ 1354 INP_WLOCK(inp); 1355 1356 tp = intotcpcb(inp); 1357 1358 CTR6(KTR_CXGBE, 1359 "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x, status %d", 1360 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, 1361 inp->inp_flags, cpl->status); 1362 1363 /* 1364 * If we'd initiated an abort earlier the reply to it is responsible for 1365 * cleaning up resources. Otherwise we tear everything down right here 1366 * right now. We owe the T4 a CPL_ABORT_RPL no matter what. 1367 */ 1368 if (toep->flags & TPF_ABORT_SHUTDOWN) { 1369 INP_WUNLOCK(inp); 1370 goto done; 1371 } 1372 toep->flags |= TPF_ABORT_SHUTDOWN; 1373 1374 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 1375 struct socket *so = inp->inp_socket; 1376 1377 if (so != NULL) 1378 so_error_set(so, abort_status_to_errno(tp, 1379 cpl->status)); 1380 tp = tcp_close(tp); 1381 if (tp == NULL) 1382 INP_WLOCK(inp); /* re-acquire */ 1383 } 1384 1385 final_cpl_received(toep); 1386 done: 1387 INP_INFO_RUNLOCK(&V_tcbinfo); 1388 CURVNET_RESTORE(); 1389 send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST); 1390 return (0); 1391 } 1392 1393 /* 1394 * Reply to the CPL_ABORT_REQ (send_reset) 1395 */ 1396 static int 1397 do_abort_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1398 { 1399 struct adapter *sc = iq->adapter; 1400 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); 1401 unsigned int tid = GET_TID(cpl); 1402 struct toepcb *toep = lookup_tid(sc, tid); 1403 struct inpcb *inp = toep->inp; 1404 #ifdef INVARIANTS 1405 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1406 #endif 1407 1408 KASSERT(opcode == CPL_ABORT_RPL_RSS, 1409 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1410 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1411 1412 if (toep->flags & TPF_SYNQE) 1413 return (do_abort_rpl_synqe(iq, rss, m)); 1414 1415 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1416 1417 CTR5(KTR_CXGBE, "%s: tid %u, toep %p, inp %p, status %d", 1418 __func__, tid, toep, inp, cpl->status); 1419 1420 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1421 ("%s: wasn't expecting abort reply", __func__)); 1422 1423 INP_WLOCK(inp); 1424 final_cpl_received(toep); 1425 1426 return (0); 1427 } 1428 1429 static int 1430 do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1431 { 1432 struct adapter *sc = iq->adapter; 1433 const struct cpl_rx_data *cpl = mtod(m, const void *); 1434 unsigned int tid = GET_TID(cpl); 1435 struct toepcb *toep = lookup_tid(sc, tid); 1436 struct inpcb *inp = toep->inp; 1437 struct tcpcb *tp; 1438 struct socket *so; 1439 struct sockbuf *sb; 1440 int len; 1441 uint32_t ddp_placed = 0; 1442 1443 if (__predict_false(toep->flags & TPF_SYNQE)) { 1444 #ifdef INVARIANTS 1445 struct synq_entry *synqe = (void *)toep; 1446 1447 INP_WLOCK(synqe->lctx->inp); 1448 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1449 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1450 ("%s: listen socket closed but tid %u not aborted.", 1451 __func__, tid)); 1452 } else { 1453 /* 1454 * do_pass_accept_req is still running and will 1455 * eventually take care of this tid. 1456 */ 1457 } 1458 INP_WUNLOCK(synqe->lctx->inp); 1459 #endif 1460 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1461 toep, toep->flags); 1462 m_freem(m); 1463 return (0); 1464 } 1465 1466 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1467 1468 /* strip off CPL header */ 1469 m_adj(m, sizeof(*cpl)); 1470 len = m->m_pkthdr.len; 1471 1472 INP_WLOCK(inp); 1473 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) { 1474 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", 1475 __func__, tid, len, inp->inp_flags); 1476 INP_WUNLOCK(inp); 1477 m_freem(m); 1478 return (0); 1479 } 1480 1481 tp = intotcpcb(inp); 1482 1483 if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq))) 1484 ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt; 1485 1486 tp->rcv_nxt += len; 1487 if (tp->rcv_wnd < len) { 1488 KASSERT(toep->ulp_mode == ULP_MODE_RDMA, 1489 ("%s: negative window size", __func__)); 1490 } 1491 1492 tp->rcv_wnd -= len; 1493 tp->t_rcvtime = ticks; 1494 1495 if (toep->ulp_mode == ULP_MODE_TCPDDP) 1496 DDP_LOCK(toep); 1497 so = inp_inpcbtosocket(inp); 1498 sb = &so->so_rcv; 1499 SOCKBUF_LOCK(sb); 1500 1501 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 1502 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)", 1503 __func__, tid, len); 1504 m_freem(m); 1505 SOCKBUF_UNLOCK(sb); 1506 if (toep->ulp_mode == ULP_MODE_TCPDDP) 1507 DDP_UNLOCK(toep); 1508 INP_WUNLOCK(inp); 1509 1510 CURVNET_SET(toep->vnet); 1511 INP_INFO_RLOCK(&V_tcbinfo); 1512 INP_WLOCK(inp); 1513 tp = tcp_drop(tp, ECONNRESET); 1514 if (tp) 1515 INP_WUNLOCK(inp); 1516 INP_INFO_RUNLOCK(&V_tcbinfo); 1517 CURVNET_RESTORE(); 1518 1519 return (0); 1520 } 1521 1522 /* receive buffer autosize */ 1523 MPASS(toep->vnet == so->so_vnet); 1524 CURVNET_SET(toep->vnet); 1525 if (sb->sb_flags & SB_AUTOSIZE && 1526 V_tcp_do_autorcvbuf && 1527 sb->sb_hiwat < V_tcp_autorcvbuf_max && 1528 len > (sbspace(sb) / 8 * 7)) { 1529 unsigned int hiwat = sb->sb_hiwat; 1530 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc, 1531 V_tcp_autorcvbuf_max); 1532 1533 if (!sbreserve_locked(sb, newsize, so, NULL)) 1534 sb->sb_flags &= ~SB_AUTOSIZE; 1535 else 1536 toep->rx_credits += newsize - hiwat; 1537 } 1538 1539 if (toep->ddp_waiting_count != 0 || toep->ddp_active_count != 0) 1540 CTR3(KTR_CXGBE, "%s: tid %u, non-ddp rx (%d bytes)", __func__, 1541 tid, len); 1542 1543 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1544 int changed = !(toep->ddp_flags & DDP_ON) ^ cpl->ddp_off; 1545 1546 if (changed) { 1547 if (toep->ddp_flags & DDP_SC_REQ) 1548 toep->ddp_flags ^= DDP_ON | DDP_SC_REQ; 1549 else { 1550 KASSERT(cpl->ddp_off == 1, 1551 ("%s: DDP switched on by itself.", 1552 __func__)); 1553 1554 /* Fell out of DDP mode */ 1555 toep->ddp_flags &= ~DDP_ON; 1556 CTR1(KTR_CXGBE, "%s: fell out of DDP mode", 1557 __func__); 1558 1559 insert_ddp_data(toep, ddp_placed); 1560 } 1561 } 1562 1563 if (toep->ddp_flags & DDP_ON) { 1564 /* 1565 * CPL_RX_DATA with DDP on can only be an indicate. 1566 * Start posting queued AIO requests via DDP. The 1567 * payload that arrived in this indicate is appended 1568 * to the socket buffer as usual. 1569 */ 1570 handle_ddp_indicate(toep); 1571 } 1572 } 1573 1574 KASSERT(toep->sb_cc >= sbused(sb), 1575 ("%s: sb %p has more data (%d) than last time (%d).", 1576 __func__, sb, sbused(sb), toep->sb_cc)); 1577 toep->rx_credits += toep->sb_cc - sbused(sb); 1578 sbappendstream_locked(sb, m, 0); 1579 toep->sb_cc = sbused(sb); 1580 if (toep->rx_credits > 0 && toep->sb_cc + tp->rcv_wnd < sb->sb_lowat) { 1581 int credits; 1582 1583 credits = send_rx_credits(sc, toep, toep->rx_credits); 1584 toep->rx_credits -= credits; 1585 tp->rcv_wnd += credits; 1586 tp->rcv_adv += credits; 1587 } 1588 1589 if (toep->ddp_waiting_count > 0 && sbavail(sb) != 0) { 1590 CTR2(KTR_CXGBE, "%s: tid %u queueing AIO task", __func__, 1591 tid); 1592 ddp_queue_toep(toep); 1593 } 1594 sorwakeup_locked(so); 1595 SOCKBUF_UNLOCK_ASSERT(sb); 1596 if (toep->ulp_mode == ULP_MODE_TCPDDP) 1597 DDP_UNLOCK(toep); 1598 1599 INP_WUNLOCK(inp); 1600 CURVNET_RESTORE(); 1601 return (0); 1602 } 1603 1604 #define S_CPL_FW4_ACK_OPCODE 24 1605 #define M_CPL_FW4_ACK_OPCODE 0xff 1606 #define V_CPL_FW4_ACK_OPCODE(x) ((x) << S_CPL_FW4_ACK_OPCODE) 1607 #define G_CPL_FW4_ACK_OPCODE(x) \ 1608 (((x) >> S_CPL_FW4_ACK_OPCODE) & M_CPL_FW4_ACK_OPCODE) 1609 1610 #define S_CPL_FW4_ACK_FLOWID 0 1611 #define M_CPL_FW4_ACK_FLOWID 0xffffff 1612 #define V_CPL_FW4_ACK_FLOWID(x) ((x) << S_CPL_FW4_ACK_FLOWID) 1613 #define G_CPL_FW4_ACK_FLOWID(x) \ 1614 (((x) >> S_CPL_FW4_ACK_FLOWID) & M_CPL_FW4_ACK_FLOWID) 1615 1616 #define S_CPL_FW4_ACK_CR 24 1617 #define M_CPL_FW4_ACK_CR 0xff 1618 #define V_CPL_FW4_ACK_CR(x) ((x) << S_CPL_FW4_ACK_CR) 1619 #define G_CPL_FW4_ACK_CR(x) (((x) >> S_CPL_FW4_ACK_CR) & M_CPL_FW4_ACK_CR) 1620 1621 #define S_CPL_FW4_ACK_SEQVAL 0 1622 #define M_CPL_FW4_ACK_SEQVAL 0x1 1623 #define V_CPL_FW4_ACK_SEQVAL(x) ((x) << S_CPL_FW4_ACK_SEQVAL) 1624 #define G_CPL_FW4_ACK_SEQVAL(x) \ 1625 (((x) >> S_CPL_FW4_ACK_SEQVAL) & M_CPL_FW4_ACK_SEQVAL) 1626 #define F_CPL_FW4_ACK_SEQVAL V_CPL_FW4_ACK_SEQVAL(1U) 1627 1628 static int 1629 do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1630 { 1631 struct adapter *sc = iq->adapter; 1632 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 1633 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 1634 struct toepcb *toep = lookup_tid(sc, tid); 1635 struct inpcb *inp; 1636 struct tcpcb *tp; 1637 struct socket *so; 1638 uint8_t credits = cpl->credits; 1639 struct ofld_tx_sdesc *txsd; 1640 int plen; 1641 #ifdef INVARIANTS 1642 unsigned int opcode = G_CPL_FW4_ACK_OPCODE(be32toh(OPCODE_TID(cpl))); 1643 #endif 1644 1645 /* 1646 * Very unusual case: we'd sent a flowc + abort_req for a synq entry and 1647 * now this comes back carrying the credits for the flowc. 1648 */ 1649 if (__predict_false(toep->flags & TPF_SYNQE)) { 1650 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1651 ("%s: credits for a synq entry %p", __func__, toep)); 1652 return (0); 1653 } 1654 1655 inp = toep->inp; 1656 1657 KASSERT(opcode == CPL_FW4_ACK, 1658 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1659 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1660 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1661 1662 INP_WLOCK(inp); 1663 1664 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) { 1665 INP_WUNLOCK(inp); 1666 return (0); 1667 } 1668 1669 KASSERT((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0, 1670 ("%s: inp_flags 0x%x", __func__, inp->inp_flags)); 1671 1672 tp = intotcpcb(inp); 1673 1674 if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) { 1675 tcp_seq snd_una = be32toh(cpl->snd_una); 1676 1677 #ifdef INVARIANTS 1678 if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) { 1679 log(LOG_ERR, 1680 "%s: unexpected seq# %x for TID %u, snd_una %x\n", 1681 __func__, snd_una, toep->tid, tp->snd_una); 1682 } 1683 #endif 1684 1685 if (tp->snd_una != snd_una) { 1686 tp->snd_una = snd_una; 1687 tp->ts_recent_age = tcp_ts_getticks(); 1688 } 1689 } 1690 1691 #ifdef VERBOSE_TRACES 1692 CTR3(KTR_CXGBE, "%s: tid %d credits %u", __func__, tid, credits); 1693 #endif 1694 so = inp->inp_socket; 1695 txsd = &toep->txsd[toep->txsd_cidx]; 1696 plen = 0; 1697 while (credits) { 1698 KASSERT(credits >= txsd->tx_credits, 1699 ("%s: too many (or partial) credits", __func__)); 1700 credits -= txsd->tx_credits; 1701 toep->tx_credits += txsd->tx_credits; 1702 plen += txsd->plen; 1703 txsd++; 1704 toep->txsd_avail++; 1705 KASSERT(toep->txsd_avail <= toep->txsd_total, 1706 ("%s: txsd avail > total", __func__)); 1707 if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) { 1708 txsd = &toep->txsd[0]; 1709 toep->txsd_cidx = 0; 1710 } 1711 } 1712 1713 if (toep->tx_credits == toep->tx_total) { 1714 toep->tx_nocompl = 0; 1715 toep->plen_nocompl = 0; 1716 } 1717 1718 if (toep->flags & TPF_TX_SUSPENDED && 1719 toep->tx_credits >= toep->tx_total / 4) { 1720 #ifdef VERBOSE_TRACES 1721 CTR2(KTR_CXGBE, "%s: tid %d calling t4_push_frames", __func__, 1722 tid); 1723 #endif 1724 toep->flags &= ~TPF_TX_SUSPENDED; 1725 CURVNET_SET(toep->vnet); 1726 if (toep->ulp_mode == ULP_MODE_ISCSI) 1727 t4_push_pdus(sc, toep, plen); 1728 else 1729 t4_push_frames(sc, toep, plen); 1730 CURVNET_RESTORE(); 1731 } else if (plen > 0) { 1732 struct sockbuf *sb = &so->so_snd; 1733 int sbu; 1734 1735 SOCKBUF_LOCK(sb); 1736 sbu = sbused(sb); 1737 if (toep->ulp_mode == ULP_MODE_ISCSI) { 1738 1739 if (__predict_false(sbu > 0)) { 1740 /* 1741 * The data trasmitted before the tid's ULP mode 1742 * changed to ISCSI is still in so_snd. 1743 * Incoming credits should account for so_snd 1744 * first. 1745 */ 1746 sbdrop_locked(sb, min(sbu, plen)); 1747 plen -= min(sbu, plen); 1748 } 1749 sowwakeup_locked(so); /* unlocks so_snd */ 1750 rqdrop_locked(&toep->ulp_pdu_reclaimq, plen); 1751 } else { 1752 #ifdef VERBOSE_TRACES 1753 CTR3(KTR_CXGBE, "%s: tid %d dropped %d bytes", __func__, 1754 tid, plen); 1755 #endif 1756 sbdrop_locked(sb, plen); 1757 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) 1758 t4_aiotx_queue_toep(toep); 1759 sowwakeup_locked(so); /* unlocks so_snd */ 1760 } 1761 SOCKBUF_UNLOCK_ASSERT(sb); 1762 } 1763 1764 INP_WUNLOCK(inp); 1765 1766 return (0); 1767 } 1768 1769 int 1770 do_set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1771 { 1772 struct adapter *sc = iq->adapter; 1773 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 1774 unsigned int tid = GET_TID(cpl); 1775 struct toepcb *toep; 1776 #ifdef INVARIANTS 1777 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1778 #endif 1779 1780 KASSERT(opcode == CPL_SET_TCB_RPL, 1781 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1782 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1783 MPASS(iq != &sc->sge.fwq); 1784 1785 toep = lookup_tid(sc, tid); 1786 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1787 handle_ddp_tcb_rpl(toep, cpl); 1788 return (0); 1789 } 1790 1791 /* 1792 * TOM and/or other ULPs don't request replies for CPL_SET_TCB or 1793 * CPL_SET_TCB_FIELD requests. This can easily change and when it does 1794 * the dispatch code will go here. 1795 */ 1796 #ifdef INVARIANTS 1797 panic("%s: Unexpected CPL_SET_TCB_RPL for tid %u on iq %p", __func__, 1798 tid, iq); 1799 #else 1800 log(LOG_ERR, "%s: Unexpected CPL_SET_TCB_RPL for tid %u on iq %p\n", 1801 __func__, tid, iq); 1802 #endif 1803 1804 return (0); 1805 } 1806 1807 void 1808 t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, int tid, 1809 uint16_t word, uint64_t mask, uint64_t val, int reply, int cookie, int iqid) 1810 { 1811 struct wrqe *wr; 1812 struct cpl_set_tcb_field *req; 1813 1814 MPASS((cookie & ~M_COOKIE) == 0); 1815 MPASS((iqid & ~M_QUEUENO) == 0); 1816 1817 wr = alloc_wrqe(sizeof(*req), wrq); 1818 if (wr == NULL) { 1819 /* XXX */ 1820 panic("%s: allocation failure.", __func__); 1821 } 1822 req = wrtod(wr); 1823 1824 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid); 1825 req->reply_ctrl = htobe16(V_QUEUENO(iqid)); 1826 if (reply == 0) 1827 req->reply_ctrl |= htobe16(F_NO_REPLY); 1828 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie)); 1829 req->mask = htobe64(mask); 1830 req->val = htobe64(val); 1831 1832 t4_wrq_tx(sc, wr); 1833 } 1834 1835 void 1836 t4_init_cpl_io_handlers(void) 1837 { 1838 1839 t4_register_cpl_handler(CPL_PEER_CLOSE, do_peer_close); 1840 t4_register_cpl_handler(CPL_CLOSE_CON_RPL, do_close_con_rpl); 1841 t4_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req); 1842 t4_register_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl); 1843 t4_register_cpl_handler(CPL_RX_DATA, do_rx_data); 1844 t4_register_cpl_handler(CPL_FW4_ACK, do_fw4_ack); 1845 } 1846 1847 void 1848 t4_uninit_cpl_io_handlers(void) 1849 { 1850 1851 t4_register_cpl_handler(CPL_PEER_CLOSE, NULL); 1852 t4_register_cpl_handler(CPL_CLOSE_CON_RPL, NULL); 1853 t4_register_cpl_handler(CPL_ABORT_REQ_RSS, NULL); 1854 t4_register_cpl_handler(CPL_ABORT_RPL_RSS, NULL); 1855 t4_register_cpl_handler(CPL_RX_DATA, NULL); 1856 t4_register_cpl_handler(CPL_FW4_ACK, NULL); 1857 } 1858 1859 /* 1860 * Use the 'backend3' field in AIO jobs to store the amount of data 1861 * sent by the AIO job so far and the 'backend4' field to hold an 1862 * error that should be reported when the job is completed. 1863 */ 1864 #define aio_sent backend3 1865 #define aio_error backend4 1866 1867 #define jobtotid(job) \ 1868 (((struct toepcb *)(so_sototcpcb((job)->fd_file->f_data)->t_toe))->tid) 1869 1870 static void 1871 free_aiotx_buffer(struct aiotx_buffer *ab) 1872 { 1873 struct kaiocb *job; 1874 long status; 1875 int error; 1876 1877 if (refcount_release(&ab->refcount) == 0) 1878 return; 1879 1880 job = ab->job; 1881 error = job->aio_error; 1882 status = job->aio_sent; 1883 vm_page_unhold_pages(ab->ps.pages, ab->ps.npages); 1884 free(ab, M_CXGBE); 1885 #ifdef VERBOSE_TRACES 1886 CTR5(KTR_CXGBE, "%s: tid %d completed %p len %ld, error %d", __func__, 1887 jobtotid(job), job, status, error); 1888 #endif 1889 if (error == ECANCELED && status != 0) 1890 error = 0; 1891 if (error == ECANCELED) 1892 aio_cancel(job); 1893 else if (error) 1894 aio_complete(job, -1, error); 1895 else 1896 aio_complete(job, status, 0); 1897 } 1898 1899 static void 1900 t4_aiotx_mbuf_free(struct mbuf *m, void *buffer, void *arg) 1901 { 1902 struct aiotx_buffer *ab = buffer; 1903 1904 #ifdef VERBOSE_TRACES 1905 CTR3(KTR_CXGBE, "%s: completed %d bytes for tid %d", __func__, 1906 m->m_len, jobtotid(ab->job)); 1907 #endif 1908 free_aiotx_buffer(ab); 1909 } 1910 1911 /* 1912 * Hold the buffer backing an AIO request and return an AIO transmit 1913 * buffer. 1914 */ 1915 static int 1916 hold_aio(struct kaiocb *job) 1917 { 1918 struct aiotx_buffer *ab; 1919 struct vmspace *vm; 1920 vm_map_t map; 1921 vm_offset_t start, end, pgoff; 1922 int n; 1923 1924 MPASS(job->backend1 == NULL); 1925 1926 /* 1927 * The AIO subsystem will cancel and drain all requests before 1928 * permitting a process to exit or exec, so p_vmspace should 1929 * be stable here. 1930 */ 1931 vm = job->userproc->p_vmspace; 1932 map = &vm->vm_map; 1933 start = (uintptr_t)job->uaiocb.aio_buf; 1934 pgoff = start & PAGE_MASK; 1935 end = round_page(start + job->uaiocb.aio_nbytes); 1936 start = trunc_page(start); 1937 n = atop(end - start); 1938 1939 ab = malloc(sizeof(*ab) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK | 1940 M_ZERO); 1941 refcount_init(&ab->refcount, 1); 1942 ab->ps.pages = (vm_page_t *)(ab + 1); 1943 ab->ps.npages = vm_fault_quick_hold_pages(map, start, end - start, 1944 VM_PROT_WRITE, ab->ps.pages, n); 1945 if (ab->ps.npages < 0) { 1946 free(ab, M_CXGBE); 1947 return (EFAULT); 1948 } 1949 1950 KASSERT(ab->ps.npages == n, 1951 ("hold_aio: page count mismatch: %d vs %d", ab->ps.npages, n)); 1952 1953 ab->ps.offset = pgoff; 1954 ab->ps.len = job->uaiocb.aio_nbytes; 1955 ab->job = job; 1956 job->backend1 = ab; 1957 #ifdef VERBOSE_TRACES 1958 CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d", 1959 __func__, jobtotid(job), &ab->ps, job, ab->ps.npages); 1960 #endif 1961 return (0); 1962 } 1963 1964 static void 1965 t4_aiotx_process_job(struct toepcb *toep, struct socket *so, struct kaiocb *job) 1966 { 1967 struct adapter *sc; 1968 struct sockbuf *sb; 1969 struct file *fp; 1970 struct aiotx_buffer *ab; 1971 struct inpcb *inp; 1972 struct tcpcb *tp; 1973 struct mbuf *m; 1974 int error; 1975 bool moretocome, sendmore; 1976 1977 sc = td_adapter(toep->td); 1978 sb = &so->so_snd; 1979 SOCKBUF_UNLOCK(sb); 1980 fp = job->fd_file; 1981 ab = job->backend1; 1982 m = NULL; 1983 1984 #ifdef MAC 1985 error = mac_socket_check_send(fp->f_cred, so); 1986 if (error != 0) 1987 goto out; 1988 #endif 1989 1990 if (ab == NULL) { 1991 error = hold_aio(job); 1992 if (error != 0) 1993 goto out; 1994 ab = job->backend1; 1995 } 1996 1997 /* Inline sosend_generic(). */ 1998 1999 job->msgsnd = 1; 2000 2001 error = sblock(sb, SBL_WAIT); 2002 MPASS(error == 0); 2003 2004 sendanother: 2005 m = m_get(M_WAITOK, MT_DATA); 2006 2007 SOCKBUF_LOCK(sb); 2008 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 2009 SOCKBUF_UNLOCK(sb); 2010 sbunlock(sb); 2011 if ((so->so_options & SO_NOSIGPIPE) == 0) { 2012 PROC_LOCK(job->userproc); 2013 kern_psignal(job->userproc, SIGPIPE); 2014 PROC_UNLOCK(job->userproc); 2015 } 2016 error = EPIPE; 2017 goto out; 2018 } 2019 if (so->so_error) { 2020 error = so->so_error; 2021 so->so_error = 0; 2022 SOCKBUF_UNLOCK(sb); 2023 sbunlock(sb); 2024 goto out; 2025 } 2026 if ((so->so_state & SS_ISCONNECTED) == 0) { 2027 SOCKBUF_UNLOCK(sb); 2028 sbunlock(sb); 2029 error = ENOTCONN; 2030 goto out; 2031 } 2032 if (sbspace(sb) < sb->sb_lowat) { 2033 MPASS(job->aio_sent == 0 || !(so->so_state & SS_NBIO)); 2034 2035 /* 2036 * Don't block if there is too little room in the socket 2037 * buffer. Instead, requeue the request. 2038 */ 2039 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) { 2040 SOCKBUF_UNLOCK(sb); 2041 sbunlock(sb); 2042 error = ECANCELED; 2043 goto out; 2044 } 2045 TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list); 2046 SOCKBUF_UNLOCK(sb); 2047 sbunlock(sb); 2048 goto out; 2049 } 2050 2051 /* 2052 * Write as much data as the socket permits, but no more than a 2053 * a single sndbuf at a time. 2054 */ 2055 m->m_len = sbspace(sb); 2056 if (m->m_len > ab->ps.len - job->aio_sent) { 2057 m->m_len = ab->ps.len - job->aio_sent; 2058 moretocome = false; 2059 } else 2060 moretocome = true; 2061 if (m->m_len > sc->tt.sndbuf) { 2062 m->m_len = sc->tt.sndbuf; 2063 sendmore = true; 2064 } else 2065 sendmore = false; 2066 2067 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) 2068 moretocome = true; 2069 SOCKBUF_UNLOCK(sb); 2070 MPASS(m->m_len != 0); 2071 2072 /* Inlined tcp_usr_send(). */ 2073 2074 inp = toep->inp; 2075 INP_WLOCK(inp); 2076 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 2077 INP_WUNLOCK(inp); 2078 sbunlock(sb); 2079 error = ECONNRESET; 2080 goto out; 2081 } 2082 2083 refcount_acquire(&ab->refcount); 2084 m_extadd(m, NULL, ab->ps.len, t4_aiotx_mbuf_free, ab, 2085 (void *)(uintptr_t)job->aio_sent, 0, EXT_NET_DRV); 2086 m->m_ext.ext_flags |= EXT_FLAG_AIOTX; 2087 job->aio_sent += m->m_len; 2088 2089 sbappendstream(sb, m, 0); 2090 m = NULL; 2091 2092 if (!(inp->inp_flags & INP_DROPPED)) { 2093 tp = intotcpcb(inp); 2094 if (moretocome) 2095 tp->t_flags |= TF_MORETOCOME; 2096 error = tp->t_fb->tfb_tcp_output(tp); 2097 if (moretocome) 2098 tp->t_flags &= ~TF_MORETOCOME; 2099 } 2100 2101 INP_WUNLOCK(inp); 2102 if (sendmore) 2103 goto sendanother; 2104 sbunlock(sb); 2105 2106 if (error) 2107 goto out; 2108 2109 /* 2110 * If this is a non-blocking socket and the request has not 2111 * been fully completed, requeue it until the socket is ready 2112 * again. 2113 */ 2114 if (job->aio_sent < job->uaiocb.aio_nbytes && 2115 !(so->so_state & SS_NBIO)) { 2116 SOCKBUF_LOCK(sb); 2117 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) { 2118 SOCKBUF_UNLOCK(sb); 2119 error = ECANCELED; 2120 goto out; 2121 } 2122 TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list); 2123 return; 2124 } 2125 2126 /* 2127 * If the request will not be requeued, drop a reference on 2128 * the aiotx buffer. Any mbufs in flight should still 2129 * contain a reference, but this drops the reference that the 2130 * job owns while it is waiting to queue mbufs to the socket. 2131 */ 2132 free_aiotx_buffer(ab); 2133 2134 out: 2135 if (error) { 2136 if (ab != NULL) { 2137 job->aio_error = error; 2138 free_aiotx_buffer(ab); 2139 } else { 2140 MPASS(job->aio_sent == 0); 2141 aio_complete(job, -1, error); 2142 } 2143 } 2144 if (m != NULL) 2145 m_free(m); 2146 SOCKBUF_LOCK(sb); 2147 } 2148 2149 static void 2150 t4_aiotx_task(void *context, int pending) 2151 { 2152 struct toepcb *toep = context; 2153 struct inpcb *inp = toep->inp; 2154 struct socket *so = inp->inp_socket; 2155 struct kaiocb *job; 2156 2157 CURVNET_SET(toep->vnet); 2158 SOCKBUF_LOCK(&so->so_snd); 2159 while (!TAILQ_EMPTY(&toep->aiotx_jobq) && sowriteable(so)) { 2160 job = TAILQ_FIRST(&toep->aiotx_jobq); 2161 TAILQ_REMOVE(&toep->aiotx_jobq, job, list); 2162 if (!aio_clear_cancel_function(job)) 2163 continue; 2164 2165 t4_aiotx_process_job(toep, so, job); 2166 } 2167 toep->aiotx_task_active = false; 2168 SOCKBUF_UNLOCK(&so->so_snd); 2169 CURVNET_RESTORE(); 2170 2171 free_toepcb(toep); 2172 } 2173 2174 static void 2175 t4_aiotx_queue_toep(struct toepcb *toep) 2176 { 2177 2178 SOCKBUF_LOCK_ASSERT(&toep->inp->inp_socket->so_snd); 2179 #ifdef VERBOSE_TRACES 2180 CTR3(KTR_CXGBE, "%s: queueing aiotx task for tid %d, active = %s", 2181 __func__, toep->tid, toep->aiotx_task_active ? "true" : "false"); 2182 #endif 2183 if (toep->aiotx_task_active) 2184 return; 2185 toep->aiotx_task_active = true; 2186 hold_toepcb(toep); 2187 soaio_enqueue(&toep->aiotx_task); 2188 } 2189 2190 static void 2191 t4_aiotx_cancel(struct kaiocb *job) 2192 { 2193 struct aiotx_buffer *ab; 2194 struct socket *so; 2195 struct sockbuf *sb; 2196 struct tcpcb *tp; 2197 struct toepcb *toep; 2198 2199 so = job->fd_file->f_data; 2200 tp = so_sototcpcb(so); 2201 toep = tp->t_toe; 2202 MPASS(job->uaiocb.aio_lio_opcode == LIO_WRITE); 2203 sb = &so->so_snd; 2204 2205 SOCKBUF_LOCK(sb); 2206 if (!aio_cancel_cleared(job)) 2207 TAILQ_REMOVE(&toep->aiotx_jobq, job, list); 2208 SOCKBUF_UNLOCK(sb); 2209 2210 ab = job->backend1; 2211 if (ab != NULL) 2212 free_aiotx_buffer(ab); 2213 else 2214 aio_cancel(job); 2215 } 2216 2217 int 2218 t4_aio_queue_aiotx(struct socket *so, struct kaiocb *job) 2219 { 2220 struct tcpcb *tp = so_sototcpcb(so); 2221 struct toepcb *toep = tp->t_toe; 2222 struct adapter *sc = td_adapter(toep->td); 2223 2224 /* This only handles writes. */ 2225 if (job->uaiocb.aio_lio_opcode != LIO_WRITE) 2226 return (EOPNOTSUPP); 2227 2228 if (!sc->tt.tx_zcopy) 2229 return (EOPNOTSUPP); 2230 2231 SOCKBUF_LOCK(&so->so_snd); 2232 #ifdef VERBOSE_TRACES 2233 CTR2(KTR_CXGBE, "%s: queueing %p", __func__, job); 2234 #endif 2235 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) 2236 panic("new job was cancelled"); 2237 TAILQ_INSERT_TAIL(&toep->aiotx_jobq, job, list); 2238 if (sowriteable(so)) 2239 t4_aiotx_queue_toep(toep); 2240 SOCKBUF_UNLOCK(&so->so_snd); 2241 return (0); 2242 } 2243 2244 void 2245 aiotx_init_toep(struct toepcb *toep) 2246 { 2247 2248 TAILQ_INIT(&toep->aiotx_jobq); 2249 TASK_INIT(&toep->aiotx_task, 0, t4_aiotx_task, toep); 2250 } 2251 #endif 2252