1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012, 2015 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 #include "opt_ratelimit.h" 36 37 #ifdef TCP_OFFLOAD 38 #include <sys/param.h> 39 #include <sys/aio.h> 40 #include <sys/file.h> 41 #include <sys/kernel.h> 42 #include <sys/ktr.h> 43 #include <sys/module.h> 44 #include <sys/proc.h> 45 #include <sys/protosw.h> 46 #include <sys/domain.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/sglist.h> 50 #include <sys/taskqueue.h> 51 #include <netinet/in.h> 52 #include <netinet/in_pcb.h> 53 #include <netinet/ip.h> 54 #include <netinet/ip6.h> 55 #define TCPSTATES 56 #include <netinet/tcp_fsm.h> 57 #include <netinet/tcp_seq.h> 58 #include <netinet/tcp_var.h> 59 #include <netinet/toecore.h> 60 61 #include <security/mac/mac_framework.h> 62 63 #include <vm/vm.h> 64 #include <vm/vm_extern.h> 65 #include <vm/pmap.h> 66 #include <vm/vm_map.h> 67 #include <vm/vm_page.h> 68 69 #include "common/common.h" 70 #include "common/t4_msg.h" 71 #include "common/t4_regs.h" 72 #include "common/t4_tcb.h" 73 #include "tom/t4_tom_l2t.h" 74 #include "tom/t4_tom.h" 75 76 #define IS_AIOTX_MBUF(m) \ 77 ((m)->m_flags & M_EXT && (m)->m_ext.ext_flags & EXT_FLAG_AIOTX) 78 79 static void t4_aiotx_cancel(struct kaiocb *job); 80 static void t4_aiotx_queue_toep(struct toepcb *toep); 81 82 static size_t 83 aiotx_mbuf_pgoff(struct mbuf *m) 84 { 85 struct aiotx_buffer *ab; 86 87 MPASS(IS_AIOTX_MBUF(m)); 88 ab = m->m_ext.ext_arg1; 89 return ((ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) % PAGE_SIZE); 90 } 91 92 static vm_page_t * 93 aiotx_mbuf_pages(struct mbuf *m) 94 { 95 struct aiotx_buffer *ab; 96 int npages; 97 98 MPASS(IS_AIOTX_MBUF(m)); 99 ab = m->m_ext.ext_arg1; 100 npages = (ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) / PAGE_SIZE; 101 return (ab->ps.pages + npages); 102 } 103 104 void 105 send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp) 106 { 107 struct wrqe *wr; 108 struct fw_flowc_wr *flowc; 109 unsigned int nparams = ftxp ? 8 : 6, flowclen; 110 struct vi_info *vi = toep->vi; 111 struct port_info *pi = vi->pi; 112 struct adapter *sc = pi->adapter; 113 unsigned int pfvf = G_FW_VIID_PFN(vi->viid) << S_FW_VIID_PFN; 114 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 115 116 KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT), 117 ("%s: flowc for tid %u sent already", __func__, toep->tid)); 118 119 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); 120 121 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq); 122 if (wr == NULL) { 123 /* XXX */ 124 panic("%s: allocation failure.", __func__); 125 } 126 flowc = wrtod(wr); 127 memset(flowc, 0, wr->wr_len); 128 129 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 130 V_FW_FLOWC_WR_NPARAMS(nparams)); 131 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | 132 V_FW_WR_FLOWID(toep->tid)); 133 134 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 135 flowc->mnemval[0].val = htobe32(pfvf); 136 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 137 flowc->mnemval[1].val = htobe32(pi->tx_chan); 138 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 139 flowc->mnemval[2].val = htobe32(pi->tx_chan); 140 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 141 flowc->mnemval[3].val = htobe32(toep->ofld_rxq->iq.abs_id); 142 if (ftxp) { 143 uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf); 144 145 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 146 flowc->mnemval[4].val = htobe32(ftxp->snd_nxt); 147 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 148 flowc->mnemval[5].val = htobe32(ftxp->rcv_nxt); 149 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 150 flowc->mnemval[6].val = htobe32(sndbuf); 151 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 152 flowc->mnemval[7].val = htobe32(ftxp->mss); 153 154 CTR6(KTR_CXGBE, 155 "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x", 156 __func__, toep->tid, ftxp->mss, sndbuf, ftxp->snd_nxt, 157 ftxp->rcv_nxt); 158 } else { 159 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF; 160 flowc->mnemval[4].val = htobe32(512); 161 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS; 162 flowc->mnemval[5].val = htobe32(512); 163 164 CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); 165 } 166 167 txsd->tx_credits = howmany(flowclen, 16); 168 txsd->plen = 0; 169 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 170 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 171 toep->tx_credits -= txsd->tx_credits; 172 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 173 toep->txsd_pidx = 0; 174 toep->txsd_avail--; 175 176 toep->flags |= TPF_FLOWC_WR_SENT; 177 t4_wrq_tx(sc, wr); 178 } 179 180 #ifdef RATELIMIT 181 /* 182 * Input is Bytes/second (so_max_pacing-rate), chip counts in Kilobits/second. 183 */ 184 static int 185 update_tx_rate_limit(struct adapter *sc, struct toepcb *toep, u_int Bps) 186 { 187 int tc_idx, rc; 188 const u_int kbps = (u_int) (uint64_t)Bps * 8ULL / 1000; 189 const int port_id = toep->vi->pi->port_id; 190 191 CTR3(KTR_CXGBE, "%s: tid %u, rate %uKbps", __func__, toep->tid, kbps); 192 193 if (kbps == 0) { 194 /* unbind */ 195 tc_idx = -1; 196 } else { 197 rc = t4_reserve_cl_rl_kbps(sc, port_id, kbps, &tc_idx); 198 if (rc != 0) 199 return (rc); 200 MPASS(tc_idx >= 0 && tc_idx < sc->chip_params->nsched_cls); 201 } 202 203 if (toep->tc_idx != tc_idx) { 204 struct wrqe *wr; 205 struct fw_flowc_wr *flowc; 206 int nparams = 1, flowclen, flowclen16; 207 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 208 209 flowclen = sizeof(*flowc) + nparams * sizeof(struct 210 fw_flowc_mnemval); 211 flowclen16 = howmany(flowclen, 16); 212 if (toep->tx_credits < flowclen16 || toep->txsd_avail == 0 || 213 (wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq)) == NULL) { 214 if (tc_idx >= 0) 215 t4_release_cl_rl_kbps(sc, port_id, tc_idx); 216 return (ENOMEM); 217 } 218 219 flowc = wrtod(wr); 220 memset(flowc, 0, wr->wr_len); 221 222 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 223 V_FW_FLOWC_WR_NPARAMS(nparams)); 224 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(flowclen16) | 225 V_FW_WR_FLOWID(toep->tid)); 226 227 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; 228 if (tc_idx == -1) 229 flowc->mnemval[0].val = htobe32(0xff); 230 else 231 flowc->mnemval[0].val = htobe32(tc_idx); 232 233 txsd->tx_credits = flowclen16; 234 txsd->plen = 0; 235 toep->tx_credits -= txsd->tx_credits; 236 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 237 toep->txsd_pidx = 0; 238 toep->txsd_avail--; 239 t4_wrq_tx(sc, wr); 240 } 241 242 if (toep->tc_idx >= 0) 243 t4_release_cl_rl_kbps(sc, port_id, toep->tc_idx); 244 toep->tc_idx = tc_idx; 245 246 return (0); 247 } 248 #endif 249 250 void 251 send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt) 252 { 253 struct wrqe *wr; 254 struct cpl_abort_req *req; 255 int tid = toep->tid; 256 struct inpcb *inp = toep->inp; 257 struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */ 258 259 INP_WLOCK_ASSERT(inp); 260 261 CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s", 262 __func__, toep->tid, 263 inp->inp_flags & INP_DROPPED ? "inp dropped" : 264 tcpstates[tp->t_state], 265 toep->flags, inp->inp_flags, 266 toep->flags & TPF_ABORT_SHUTDOWN ? 267 " (abort already in progress)" : ""); 268 269 if (toep->flags & TPF_ABORT_SHUTDOWN) 270 return; /* abort already in progress */ 271 272 toep->flags |= TPF_ABORT_SHUTDOWN; 273 274 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 275 ("%s: flowc_wr not sent for tid %d.", __func__, tid)); 276 277 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 278 if (wr == NULL) { 279 /* XXX */ 280 panic("%s: allocation failure.", __func__); 281 } 282 req = wrtod(wr); 283 284 INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid); 285 if (inp->inp_flags & INP_DROPPED) 286 req->rsvd0 = htobe32(snd_nxt); 287 else 288 req->rsvd0 = htobe32(tp->snd_nxt); 289 req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT); 290 req->cmd = CPL_ABORT_SEND_RST; 291 292 /* 293 * XXX: What's the correct way to tell that the inp hasn't been detached 294 * from its socket? Should I even be flushing the snd buffer here? 295 */ 296 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 297 struct socket *so = inp->inp_socket; 298 299 if (so != NULL) /* because I'm not sure. See comment above */ 300 sbflush(&so->so_snd); 301 } 302 303 t4_l2t_send(sc, wr, toep->l2te); 304 } 305 306 /* 307 * Called when a connection is established to translate the TCP options 308 * reported by HW to FreeBSD's native format. 309 */ 310 static void 311 assign_rxopt(struct tcpcb *tp, unsigned int opt) 312 { 313 struct toepcb *toep = tp->t_toe; 314 struct inpcb *inp = tp->t_inpcb; 315 struct adapter *sc = td_adapter(toep->td); 316 int n; 317 318 INP_LOCK_ASSERT(inp); 319 320 if (inp->inp_inc.inc_flags & INC_ISIPV6) 321 n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 322 else 323 n = sizeof(struct ip) + sizeof(struct tcphdr); 324 if (V_tcp_do_rfc1323) 325 n += TCPOLEN_TSTAMP_APPA; 326 tp->t_maxseg = sc->params.mtus[G_TCPOPT_MSS(opt)] - n; 327 328 CTR4(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u)", __func__, toep->tid, 329 G_TCPOPT_MSS(opt), sc->params.mtus[G_TCPOPT_MSS(opt)]); 330 331 if (G_TCPOPT_TSTAMP(opt)) { 332 tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */ 333 tp->ts_recent = 0; /* hmmm */ 334 tp->ts_recent_age = tcp_ts_getticks(); 335 } 336 337 if (G_TCPOPT_SACK(opt)) 338 tp->t_flags |= TF_SACK_PERMIT; /* should already be set */ 339 else 340 tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */ 341 342 if (G_TCPOPT_WSCALE_OK(opt)) 343 tp->t_flags |= TF_RCVD_SCALE; 344 345 /* Doing window scaling? */ 346 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 347 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 348 tp->rcv_scale = tp->request_r_scale; 349 tp->snd_scale = G_TCPOPT_SND_WSCALE(opt); 350 } 351 } 352 353 /* 354 * Completes some final bits of initialization for just established connections 355 * and changes their state to TCPS_ESTABLISHED. 356 * 357 * The ISNs are from after the exchange of SYNs. i.e., the true ISN + 1. 358 */ 359 void 360 make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn, 361 uint16_t opt) 362 { 363 struct inpcb *inp = toep->inp; 364 struct socket *so = inp->inp_socket; 365 struct tcpcb *tp = intotcpcb(inp); 366 long bufsize; 367 uint32_t iss = be32toh(snd_isn) - 1; /* true ISS */ 368 uint32_t irs = be32toh(rcv_isn) - 1; /* true IRS */ 369 uint16_t tcpopt = be16toh(opt); 370 struct flowc_tx_params ftxp; 371 372 INP_WLOCK_ASSERT(inp); 373 KASSERT(tp->t_state == TCPS_SYN_SENT || 374 tp->t_state == TCPS_SYN_RECEIVED, 375 ("%s: TCP state %s", __func__, tcpstates[tp->t_state])); 376 377 CTR6(KTR_CXGBE, "%s: tid %d, so %p, inp %p, tp %p, toep %p", 378 __func__, toep->tid, so, inp, tp, toep); 379 380 tp->t_state = TCPS_ESTABLISHED; 381 tp->t_starttime = ticks; 382 TCPSTAT_INC(tcps_connects); 383 384 tp->irs = irs; 385 tcp_rcvseqinit(tp); 386 tp->rcv_wnd = toep->rx_credits << 10; 387 tp->rcv_adv += tp->rcv_wnd; 388 tp->last_ack_sent = tp->rcv_nxt; 389 390 /* 391 * If we were unable to send all rx credits via opt0, save the remainder 392 * in rx_credits so that they can be handed over with the next credit 393 * update. 394 */ 395 SOCKBUF_LOCK(&so->so_rcv); 396 bufsize = select_rcv_wnd(so); 397 SOCKBUF_UNLOCK(&so->so_rcv); 398 toep->rx_credits = bufsize - tp->rcv_wnd; 399 400 tp->iss = iss; 401 tcp_sendseqinit(tp); 402 tp->snd_una = iss + 1; 403 tp->snd_nxt = iss + 1; 404 tp->snd_max = iss + 1; 405 406 assign_rxopt(tp, tcpopt); 407 408 SOCKBUF_LOCK(&so->so_snd); 409 if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf) 410 bufsize = V_tcp_autosndbuf_max; 411 else 412 bufsize = sbspace(&so->so_snd); 413 SOCKBUF_UNLOCK(&so->so_snd); 414 415 ftxp.snd_nxt = tp->snd_nxt; 416 ftxp.rcv_nxt = tp->rcv_nxt; 417 ftxp.snd_space = bufsize; 418 ftxp.mss = tp->t_maxseg; 419 send_flowc_wr(toep, &ftxp); 420 421 soisconnected(so); 422 } 423 424 static int 425 send_rx_credits(struct adapter *sc, struct toepcb *toep, int credits) 426 { 427 struct wrqe *wr; 428 struct cpl_rx_data_ack *req; 429 uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); 430 431 KASSERT(credits >= 0, ("%s: %d credits", __func__, credits)); 432 433 wr = alloc_wrqe(sizeof(*req), toep->ctrlq); 434 if (wr == NULL) 435 return (0); 436 req = wrtod(wr); 437 438 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); 439 req->credit_dack = htobe32(dack | V_RX_CREDITS(credits)); 440 441 t4_wrq_tx(sc, wr); 442 return (credits); 443 } 444 445 void 446 t4_rcvd_locked(struct toedev *tod, struct tcpcb *tp) 447 { 448 struct adapter *sc = tod->tod_softc; 449 struct inpcb *inp = tp->t_inpcb; 450 struct socket *so = inp->inp_socket; 451 struct sockbuf *sb = &so->so_rcv; 452 struct toepcb *toep = tp->t_toe; 453 int credits; 454 455 INP_WLOCK_ASSERT(inp); 456 457 SOCKBUF_LOCK_ASSERT(sb); 458 KASSERT(toep->sb_cc >= sbused(sb), 459 ("%s: sb %p has more data (%d) than last time (%d).", 460 __func__, sb, sbused(sb), toep->sb_cc)); 461 462 toep->rx_credits += toep->sb_cc - sbused(sb); 463 toep->sb_cc = sbused(sb); 464 465 if (toep->rx_credits > 0 && 466 (tp->rcv_wnd <= 32 * 1024 || toep->rx_credits >= 64 * 1024 || 467 (toep->rx_credits >= 16 * 1024 && tp->rcv_wnd <= 128 * 1024) || 468 toep->sb_cc + tp->rcv_wnd < sb->sb_lowat)) { 469 470 credits = send_rx_credits(sc, toep, toep->rx_credits); 471 toep->rx_credits -= credits; 472 tp->rcv_wnd += credits; 473 tp->rcv_adv += credits; 474 } 475 } 476 477 void 478 t4_rcvd(struct toedev *tod, struct tcpcb *tp) 479 { 480 struct inpcb *inp = tp->t_inpcb; 481 struct socket *so = inp->inp_socket; 482 struct sockbuf *sb = &so->so_rcv; 483 484 SOCKBUF_LOCK(sb); 485 t4_rcvd_locked(tod, tp); 486 SOCKBUF_UNLOCK(sb); 487 } 488 489 /* 490 * Close a connection by sending a CPL_CLOSE_CON_REQ message. 491 */ 492 static int 493 close_conn(struct adapter *sc, struct toepcb *toep) 494 { 495 struct wrqe *wr; 496 struct cpl_close_con_req *req; 497 unsigned int tid = toep->tid; 498 499 CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid, 500 toep->flags & TPF_FIN_SENT ? ", IGNORED" : ""); 501 502 if (toep->flags & TPF_FIN_SENT) 503 return (0); 504 505 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 506 ("%s: flowc_wr not sent for tid %u.", __func__, tid)); 507 508 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 509 if (wr == NULL) { 510 /* XXX */ 511 panic("%s: allocation failure.", __func__); 512 } 513 req = wrtod(wr); 514 515 req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | 516 V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr))); 517 req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) | 518 V_FW_WR_FLOWID(tid)); 519 req->wr.wr_lo = cpu_to_be64(0); 520 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 521 req->rsvd = 0; 522 523 toep->flags |= TPF_FIN_SENT; 524 toep->flags &= ~TPF_SEND_FIN; 525 t4_l2t_send(sc, wr, toep->l2te); 526 527 return (0); 528 } 529 530 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16) 531 #define MIN_OFLD_TX_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16)) 532 533 /* Maximum amount of immediate data we could stuff in a WR */ 534 static inline int 535 max_imm_payload(int tx_credits) 536 { 537 const int n = 2; /* Use only up to 2 desc for imm. data WR */ 538 539 KASSERT(tx_credits >= 0 && 540 tx_credits <= MAX_OFLD_TX_CREDITS, 541 ("%s: %d credits", __func__, tx_credits)); 542 543 if (tx_credits < MIN_OFLD_TX_CREDITS) 544 return (0); 545 546 if (tx_credits >= (n * EQ_ESIZE) / 16) 547 return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr)); 548 else 549 return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr)); 550 } 551 552 /* Maximum number of SGL entries we could stuff in a WR */ 553 static inline int 554 max_dsgl_nsegs(int tx_credits) 555 { 556 int nseg = 1; /* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */ 557 int sge_pair_credits = tx_credits - MIN_OFLD_TX_CREDITS; 558 559 KASSERT(tx_credits >= 0 && 560 tx_credits <= MAX_OFLD_TX_CREDITS, 561 ("%s: %d credits", __func__, tx_credits)); 562 563 if (tx_credits < MIN_OFLD_TX_CREDITS) 564 return (0); 565 566 nseg += 2 * (sge_pair_credits * 16 / 24); 567 if ((sge_pair_credits * 16) % 24 == 16) 568 nseg++; 569 570 return (nseg); 571 } 572 573 static inline void 574 write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen, 575 unsigned int plen, uint8_t credits, int shove, int ulp_submode, int txalign) 576 { 577 struct fw_ofld_tx_data_wr *txwr = dst; 578 579 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) | 580 V_FW_WR_IMMDLEN(immdlen)); 581 txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) | 582 V_FW_WR_LEN16(credits)); 583 txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(toep->ulp_mode) | 584 V_TX_ULP_SUBMODE(ulp_submode) | V_TX_URG(0) | V_TX_SHOVE(shove)); 585 txwr->plen = htobe32(plen); 586 587 if (txalign > 0) { 588 struct tcpcb *tp = intotcpcb(toep->inp); 589 590 if (plen < 2 * tp->t_maxseg || is_10G_port(toep->vi->pi)) 591 txwr->lsodisable_to_flags |= 592 htobe32(F_FW_OFLD_TX_DATA_WR_LSODISABLE); 593 else 594 txwr->lsodisable_to_flags |= 595 htobe32(F_FW_OFLD_TX_DATA_WR_ALIGNPLD | 596 (tp->t_flags & TF_NODELAY ? 0 : 597 F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE)); 598 } 599 } 600 601 /* 602 * Generate a DSGL from a starting mbuf. The total number of segments and the 603 * maximum segments in any one mbuf are provided. 604 */ 605 static void 606 write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n) 607 { 608 struct mbuf *m; 609 struct ulptx_sgl *usgl = dst; 610 int i, j, rc; 611 struct sglist sg; 612 struct sglist_seg segs[n]; 613 614 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__)); 615 616 sglist_init(&sg, n, segs); 617 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 618 V_ULPTX_NSGE(nsegs)); 619 620 i = -1; 621 for (m = start; m != stop; m = m->m_next) { 622 if (IS_AIOTX_MBUF(m)) 623 rc = sglist_append_vmpages(&sg, aiotx_mbuf_pages(m), 624 aiotx_mbuf_pgoff(m), m->m_len); 625 else 626 rc = sglist_append(&sg, mtod(m, void *), m->m_len); 627 if (__predict_false(rc != 0)) 628 panic("%s: sglist_append %d", __func__, rc); 629 630 for (j = 0; j < sg.sg_nseg; i++, j++) { 631 if (i < 0) { 632 usgl->len0 = htobe32(segs[j].ss_len); 633 usgl->addr0 = htobe64(segs[j].ss_paddr); 634 } else { 635 usgl->sge[i / 2].len[i & 1] = 636 htobe32(segs[j].ss_len); 637 usgl->sge[i / 2].addr[i & 1] = 638 htobe64(segs[j].ss_paddr); 639 } 640 #ifdef INVARIANTS 641 nsegs--; 642 #endif 643 } 644 sglist_reset(&sg); 645 } 646 if (i & 1) 647 usgl->sge[i / 2].len[1] = htobe32(0); 648 KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, stop %p", 649 __func__, nsegs, start, stop)); 650 } 651 652 /* 653 * Max number of SGL entries an offload tx work request can have. This is 41 654 * (1 + 40) for a full 512B work request. 655 * fw_ofld_tx_data_wr(16B) + ulptx_sgl(16B, 1) + ulptx_sge_pair(480B, 40) 656 */ 657 #define OFLD_SGL_LEN (41) 658 659 /* 660 * Send data and/or a FIN to the peer. 661 * 662 * The socket's so_snd buffer consists of a stream of data starting with sb_mb 663 * and linked together with m_next. sb_sndptr, if set, is the last mbuf that 664 * was transmitted. 665 * 666 * drop indicates the number of bytes that should be dropped from the head of 667 * the send buffer. It is an optimization that lets do_fw4_ack avoid creating 668 * contention on the send buffer lock (before this change it used to do 669 * sowwakeup and then t4_push_frames right after that when recovering from tx 670 * stalls). When drop is set this function MUST drop the bytes and wake up any 671 * writers. 672 */ 673 void 674 t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop) 675 { 676 struct mbuf *sndptr, *m, *sb_sndptr; 677 struct fw_ofld_tx_data_wr *txwr; 678 struct wrqe *wr; 679 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 680 struct inpcb *inp = toep->inp; 681 struct tcpcb *tp = intotcpcb(inp); 682 struct socket *so = inp->inp_socket; 683 struct sockbuf *sb = &so->so_snd; 684 int tx_credits, shove, compl, sowwakeup; 685 struct ofld_tx_sdesc *txsd; 686 bool aiotx_mbuf_seen; 687 688 INP_WLOCK_ASSERT(inp); 689 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 690 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 691 692 KASSERT(toep->ulp_mode == ULP_MODE_NONE || 693 toep->ulp_mode == ULP_MODE_TCPDDP || 694 toep->ulp_mode == ULP_MODE_RDMA, 695 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 696 697 #ifdef VERBOSE_TRACES 698 CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d", 699 __func__, toep->tid, toep->flags, tp->t_flags); 700 #endif 701 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) 702 return; 703 704 #ifdef RATELIMIT 705 if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) && 706 (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) { 707 inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED; 708 } 709 #endif 710 711 /* 712 * This function doesn't resume by itself. Someone else must clear the 713 * flag and call this function. 714 */ 715 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 716 KASSERT(drop == 0, 717 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 718 return; 719 } 720 721 txsd = &toep->txsd[toep->txsd_pidx]; 722 do { 723 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 724 max_imm = max_imm_payload(tx_credits); 725 max_nsegs = max_dsgl_nsegs(tx_credits); 726 727 SOCKBUF_LOCK(sb); 728 sowwakeup = drop; 729 if (drop) { 730 sbdrop_locked(sb, drop); 731 drop = 0; 732 } 733 sb_sndptr = sb->sb_sndptr; 734 sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb; 735 plen = 0; 736 nsegs = 0; 737 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 738 aiotx_mbuf_seen = false; 739 for (m = sndptr; m != NULL; m = m->m_next) { 740 int n; 741 742 if (IS_AIOTX_MBUF(m)) 743 n = sglist_count_vmpages(aiotx_mbuf_pages(m), 744 aiotx_mbuf_pgoff(m), m->m_len); 745 else 746 n = sglist_count(mtod(m, void *), m->m_len); 747 748 nsegs += n; 749 plen += m->m_len; 750 751 /* This mbuf sent us _over_ the nsegs limit, back out */ 752 if (plen > max_imm && nsegs > max_nsegs) { 753 nsegs -= n; 754 plen -= m->m_len; 755 if (plen == 0) { 756 /* Too few credits */ 757 toep->flags |= TPF_TX_SUSPENDED; 758 if (sowwakeup) { 759 if (!TAILQ_EMPTY( 760 &toep->aiotx_jobq)) 761 t4_aiotx_queue_toep( 762 toep); 763 sowwakeup_locked(so); 764 } else 765 SOCKBUF_UNLOCK(sb); 766 SOCKBUF_UNLOCK_ASSERT(sb); 767 return; 768 } 769 break; 770 } 771 772 if (IS_AIOTX_MBUF(m)) 773 aiotx_mbuf_seen = true; 774 if (max_nsegs_1mbuf < n) 775 max_nsegs_1mbuf = n; 776 sb_sndptr = m; /* new sb->sb_sndptr if all goes well */ 777 778 /* This mbuf put us right at the max_nsegs limit */ 779 if (plen > max_imm && nsegs == max_nsegs) { 780 m = m->m_next; 781 break; 782 } 783 } 784 785 if (sbused(sb) > sb->sb_hiwat * 5 / 8 && 786 toep->plen_nocompl + plen >= sb->sb_hiwat / 4) 787 compl = 1; 788 else 789 compl = 0; 790 791 if (sb->sb_flags & SB_AUTOSIZE && 792 V_tcp_do_autosndbuf && 793 sb->sb_hiwat < V_tcp_autosndbuf_max && 794 sbused(sb) >= sb->sb_hiwat * 7 / 8) { 795 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc, 796 V_tcp_autosndbuf_max); 797 798 if (!sbreserve_locked(sb, newsize, so, NULL)) 799 sb->sb_flags &= ~SB_AUTOSIZE; 800 else 801 sowwakeup = 1; /* room available */ 802 } 803 if (sowwakeup) { 804 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) 805 t4_aiotx_queue_toep(toep); 806 sowwakeup_locked(so); 807 } else 808 SOCKBUF_UNLOCK(sb); 809 SOCKBUF_UNLOCK_ASSERT(sb); 810 811 /* nothing to send */ 812 if (plen == 0) { 813 KASSERT(m == NULL, 814 ("%s: nothing to send, but m != NULL", __func__)); 815 break; 816 } 817 818 if (__predict_false(toep->flags & TPF_FIN_SENT)) 819 panic("%s: excess tx.", __func__); 820 821 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME); 822 if (plen <= max_imm && !aiotx_mbuf_seen) { 823 824 /* Immediate data tx */ 825 826 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 827 toep->ofld_txq); 828 if (wr == NULL) { 829 /* XXX: how will we recover from this? */ 830 toep->flags |= TPF_TX_SUSPENDED; 831 return; 832 } 833 txwr = wrtod(wr); 834 credits = howmany(wr->wr_len, 16); 835 write_tx_wr(txwr, toep, plen, plen, credits, shove, 0, 836 sc->tt.tx_align); 837 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 838 nsegs = 0; 839 } else { 840 int wr_len; 841 842 /* DSGL tx */ 843 844 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 845 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 846 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 847 if (wr == NULL) { 848 /* XXX: how will we recover from this? */ 849 toep->flags |= TPF_TX_SUSPENDED; 850 return; 851 } 852 txwr = wrtod(wr); 853 credits = howmany(wr_len, 16); 854 write_tx_wr(txwr, toep, 0, plen, credits, shove, 0, 855 sc->tt.tx_align); 856 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 857 max_nsegs_1mbuf); 858 if (wr_len & 0xf) { 859 uint64_t *pad = (uint64_t *) 860 ((uintptr_t)txwr + wr_len); 861 *pad = 0; 862 } 863 } 864 865 KASSERT(toep->tx_credits >= credits, 866 ("%s: not enough credits", __func__)); 867 868 toep->tx_credits -= credits; 869 toep->tx_nocompl += credits; 870 toep->plen_nocompl += plen; 871 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 872 toep->tx_nocompl >= toep->tx_total / 4) 873 compl = 1; 874 875 if (compl || toep->ulp_mode == ULP_MODE_RDMA) { 876 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 877 toep->tx_nocompl = 0; 878 toep->plen_nocompl = 0; 879 } 880 881 tp->snd_nxt += plen; 882 tp->snd_max += plen; 883 884 SOCKBUF_LOCK(sb); 885 KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__)); 886 sb->sb_sndptr = sb_sndptr; 887 SOCKBUF_UNLOCK(sb); 888 889 toep->flags |= TPF_TX_DATA_SENT; 890 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 891 toep->flags |= TPF_TX_SUSPENDED; 892 893 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 894 txsd->plen = plen; 895 txsd->tx_credits = credits; 896 txsd++; 897 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 898 toep->txsd_pidx = 0; 899 txsd = &toep->txsd[0]; 900 } 901 toep->txsd_avail--; 902 903 t4_l2t_send(sc, wr, toep->l2te); 904 } while (m != NULL); 905 906 /* Send a FIN if requested, but only if there's no more data to send */ 907 if (m == NULL && toep->flags & TPF_SEND_FIN) 908 close_conn(sc, toep); 909 } 910 911 static inline void 912 rqdrop_locked(struct mbufq *q, int plen) 913 { 914 struct mbuf *m; 915 916 while (plen > 0) { 917 m = mbufq_dequeue(q); 918 919 /* Too many credits. */ 920 MPASS(m != NULL); 921 M_ASSERTPKTHDR(m); 922 923 /* Partial credits. */ 924 MPASS(plen >= m->m_pkthdr.len); 925 926 plen -= m->m_pkthdr.len; 927 m_freem(m); 928 } 929 } 930 931 void 932 t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop) 933 { 934 struct mbuf *sndptr, *m; 935 struct fw_ofld_tx_data_wr *txwr; 936 struct wrqe *wr; 937 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 938 u_int adjusted_plen, ulp_submode; 939 struct inpcb *inp = toep->inp; 940 struct tcpcb *tp = intotcpcb(inp); 941 int tx_credits, shove; 942 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 943 struct mbufq *pduq = &toep->ulp_pduq; 944 static const u_int ulp_extra_len[] = {0, 4, 4, 8}; 945 946 INP_WLOCK_ASSERT(inp); 947 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 948 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 949 KASSERT(toep->ulp_mode == ULP_MODE_ISCSI, 950 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 951 952 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) 953 return; 954 955 /* 956 * This function doesn't resume by itself. Someone else must clear the 957 * flag and call this function. 958 */ 959 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 960 KASSERT(drop == 0, 961 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 962 return; 963 } 964 965 if (drop) 966 rqdrop_locked(&toep->ulp_pdu_reclaimq, drop); 967 968 while ((sndptr = mbufq_first(pduq)) != NULL) { 969 M_ASSERTPKTHDR(sndptr); 970 971 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 972 max_imm = max_imm_payload(tx_credits); 973 max_nsegs = max_dsgl_nsegs(tx_credits); 974 975 plen = 0; 976 nsegs = 0; 977 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 978 for (m = sndptr; m != NULL; m = m->m_next) { 979 int n = sglist_count(mtod(m, void *), m->m_len); 980 981 nsegs += n; 982 plen += m->m_len; 983 984 /* 985 * This mbuf would send us _over_ the nsegs limit. 986 * Suspend tx because the PDU can't be sent out. 987 */ 988 if (plen > max_imm && nsegs > max_nsegs) { 989 toep->flags |= TPF_TX_SUSPENDED; 990 return; 991 } 992 993 if (max_nsegs_1mbuf < n) 994 max_nsegs_1mbuf = n; 995 } 996 997 if (__predict_false(toep->flags & TPF_FIN_SENT)) 998 panic("%s: excess tx.", __func__); 999 1000 /* 1001 * We have a PDU to send. All of it goes out in one WR so 'm' 1002 * is NULL. A PDU's length is always a multiple of 4. 1003 */ 1004 MPASS(m == NULL); 1005 MPASS((plen & 3) == 0); 1006 MPASS(sndptr->m_pkthdr.len == plen); 1007 1008 shove = !(tp->t_flags & TF_MORETOCOME); 1009 ulp_submode = mbuf_ulp_submode(sndptr); 1010 MPASS(ulp_submode < nitems(ulp_extra_len)); 1011 1012 /* 1013 * plen doesn't include header and data digests, which are 1014 * generated and inserted in the right places by the TOE, but 1015 * they do occupy TCP sequence space and need to be accounted 1016 * for. 1017 */ 1018 adjusted_plen = plen + ulp_extra_len[ulp_submode]; 1019 if (plen <= max_imm) { 1020 1021 /* Immediate data tx */ 1022 1023 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 1024 toep->ofld_txq); 1025 if (wr == NULL) { 1026 /* XXX: how will we recover from this? */ 1027 toep->flags |= TPF_TX_SUSPENDED; 1028 return; 1029 } 1030 txwr = wrtod(wr); 1031 credits = howmany(wr->wr_len, 16); 1032 write_tx_wr(txwr, toep, plen, adjusted_plen, credits, 1033 shove, ulp_submode, sc->tt.tx_align); 1034 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 1035 nsegs = 0; 1036 } else { 1037 int wr_len; 1038 1039 /* DSGL tx */ 1040 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 1041 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 1042 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 1043 if (wr == NULL) { 1044 /* XXX: how will we recover from this? */ 1045 toep->flags |= TPF_TX_SUSPENDED; 1046 return; 1047 } 1048 txwr = wrtod(wr); 1049 credits = howmany(wr_len, 16); 1050 write_tx_wr(txwr, toep, 0, adjusted_plen, credits, 1051 shove, ulp_submode, sc->tt.tx_align); 1052 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 1053 max_nsegs_1mbuf); 1054 if (wr_len & 0xf) { 1055 uint64_t *pad = (uint64_t *) 1056 ((uintptr_t)txwr + wr_len); 1057 *pad = 0; 1058 } 1059 } 1060 1061 KASSERT(toep->tx_credits >= credits, 1062 ("%s: not enough credits", __func__)); 1063 1064 m = mbufq_dequeue(pduq); 1065 MPASS(m == sndptr); 1066 mbufq_enqueue(&toep->ulp_pdu_reclaimq, m); 1067 1068 toep->tx_credits -= credits; 1069 toep->tx_nocompl += credits; 1070 toep->plen_nocompl += plen; 1071 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 1072 toep->tx_nocompl >= toep->tx_total / 4) { 1073 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 1074 toep->tx_nocompl = 0; 1075 toep->plen_nocompl = 0; 1076 } 1077 1078 tp->snd_nxt += adjusted_plen; 1079 tp->snd_max += adjusted_plen; 1080 1081 toep->flags |= TPF_TX_DATA_SENT; 1082 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 1083 toep->flags |= TPF_TX_SUSPENDED; 1084 1085 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 1086 txsd->plen = plen; 1087 txsd->tx_credits = credits; 1088 txsd++; 1089 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 1090 toep->txsd_pidx = 0; 1091 txsd = &toep->txsd[0]; 1092 } 1093 toep->txsd_avail--; 1094 1095 t4_l2t_send(sc, wr, toep->l2te); 1096 } 1097 1098 /* Send a FIN if requested, but only if there are no more PDUs to send */ 1099 if (mbufq_first(pduq) == NULL && toep->flags & TPF_SEND_FIN) 1100 close_conn(sc, toep); 1101 } 1102 1103 int 1104 t4_tod_output(struct toedev *tod, struct tcpcb *tp) 1105 { 1106 struct adapter *sc = tod->tod_softc; 1107 #ifdef INVARIANTS 1108 struct inpcb *inp = tp->t_inpcb; 1109 #endif 1110 struct toepcb *toep = tp->t_toe; 1111 1112 INP_WLOCK_ASSERT(inp); 1113 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1114 ("%s: inp %p dropped.", __func__, inp)); 1115 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1116 1117 if (toep->ulp_mode == ULP_MODE_ISCSI) 1118 t4_push_pdus(sc, toep, 0); 1119 else 1120 t4_push_frames(sc, toep, 0); 1121 1122 return (0); 1123 } 1124 1125 int 1126 t4_send_fin(struct toedev *tod, struct tcpcb *tp) 1127 { 1128 struct adapter *sc = tod->tod_softc; 1129 #ifdef INVARIANTS 1130 struct inpcb *inp = tp->t_inpcb; 1131 #endif 1132 struct toepcb *toep = tp->t_toe; 1133 1134 INP_WLOCK_ASSERT(inp); 1135 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1136 ("%s: inp %p dropped.", __func__, inp)); 1137 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1138 1139 toep->flags |= TPF_SEND_FIN; 1140 if (tp->t_state >= TCPS_ESTABLISHED) { 1141 if (toep->ulp_mode == ULP_MODE_ISCSI) 1142 t4_push_pdus(sc, toep, 0); 1143 else 1144 t4_push_frames(sc, toep, 0); 1145 } 1146 1147 return (0); 1148 } 1149 1150 int 1151 t4_send_rst(struct toedev *tod, struct tcpcb *tp) 1152 { 1153 struct adapter *sc = tod->tod_softc; 1154 #if defined(INVARIANTS) 1155 struct inpcb *inp = tp->t_inpcb; 1156 #endif 1157 struct toepcb *toep = tp->t_toe; 1158 1159 INP_WLOCK_ASSERT(inp); 1160 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1161 ("%s: inp %p dropped.", __func__, inp)); 1162 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1163 1164 /* hmmmm */ 1165 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 1166 ("%s: flowc for tid %u [%s] not sent already", 1167 __func__, toep->tid, tcpstates[tp->t_state])); 1168 1169 send_reset(sc, toep, 0); 1170 return (0); 1171 } 1172 1173 /* 1174 * Peer has sent us a FIN. 1175 */ 1176 static int 1177 do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1178 { 1179 struct adapter *sc = iq->adapter; 1180 const struct cpl_peer_close *cpl = (const void *)(rss + 1); 1181 unsigned int tid = GET_TID(cpl); 1182 struct toepcb *toep = lookup_tid(sc, tid); 1183 struct inpcb *inp = toep->inp; 1184 struct tcpcb *tp = NULL; 1185 struct socket *so; 1186 #ifdef INVARIANTS 1187 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1188 #endif 1189 1190 KASSERT(opcode == CPL_PEER_CLOSE, 1191 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1192 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1193 1194 if (__predict_false(toep->flags & TPF_SYNQE)) { 1195 #ifdef INVARIANTS 1196 struct synq_entry *synqe = (void *)toep; 1197 1198 INP_WLOCK(synqe->lctx->inp); 1199 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1200 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1201 ("%s: listen socket closed but tid %u not aborted.", 1202 __func__, tid)); 1203 } else { 1204 /* 1205 * do_pass_accept_req is still running and will 1206 * eventually take care of this tid. 1207 */ 1208 } 1209 INP_WUNLOCK(synqe->lctx->inp); 1210 #endif 1211 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1212 toep, toep->flags); 1213 return (0); 1214 } 1215 1216 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1217 1218 CURVNET_SET(toep->vnet); 1219 INP_INFO_RLOCK(&V_tcbinfo); 1220 INP_WLOCK(inp); 1221 tp = intotcpcb(inp); 1222 1223 CTR5(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x, inp %p", __func__, 1224 tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, inp); 1225 1226 if (toep->flags & TPF_ABORT_SHUTDOWN) 1227 goto done; 1228 1229 tp->rcv_nxt++; /* FIN */ 1230 1231 so = inp->inp_socket; 1232 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1233 DDP_LOCK(toep); 1234 if (__predict_false(toep->ddp_flags & 1235 (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE))) 1236 handle_ddp_close(toep, tp, cpl->rcv_nxt); 1237 DDP_UNLOCK(toep); 1238 } 1239 socantrcvmore(so); 1240 1241 if (toep->ulp_mode != ULP_MODE_RDMA) { 1242 KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt), 1243 ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt, 1244 be32toh(cpl->rcv_nxt))); 1245 } 1246 1247 switch (tp->t_state) { 1248 case TCPS_SYN_RECEIVED: 1249 tp->t_starttime = ticks; 1250 /* FALLTHROUGH */ 1251 1252 case TCPS_ESTABLISHED: 1253 tp->t_state = TCPS_CLOSE_WAIT; 1254 break; 1255 1256 case TCPS_FIN_WAIT_1: 1257 tp->t_state = TCPS_CLOSING; 1258 break; 1259 1260 case TCPS_FIN_WAIT_2: 1261 tcp_twstart(tp); 1262 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1263 INP_INFO_RUNLOCK(&V_tcbinfo); 1264 CURVNET_RESTORE(); 1265 1266 INP_WLOCK(inp); 1267 final_cpl_received(toep); 1268 return (0); 1269 1270 default: 1271 log(LOG_ERR, "%s: TID %u received CPL_PEER_CLOSE in state %d\n", 1272 __func__, tid, tp->t_state); 1273 } 1274 done: 1275 INP_WUNLOCK(inp); 1276 INP_INFO_RUNLOCK(&V_tcbinfo); 1277 CURVNET_RESTORE(); 1278 return (0); 1279 } 1280 1281 /* 1282 * Peer has ACK'd our FIN. 1283 */ 1284 static int 1285 do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss, 1286 struct mbuf *m) 1287 { 1288 struct adapter *sc = iq->adapter; 1289 const struct cpl_close_con_rpl *cpl = (const void *)(rss + 1); 1290 unsigned int tid = GET_TID(cpl); 1291 struct toepcb *toep = lookup_tid(sc, tid); 1292 struct inpcb *inp = toep->inp; 1293 struct tcpcb *tp = NULL; 1294 struct socket *so = NULL; 1295 #ifdef INVARIANTS 1296 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1297 #endif 1298 1299 KASSERT(opcode == CPL_CLOSE_CON_RPL, 1300 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1301 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1302 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1303 1304 CURVNET_SET(toep->vnet); 1305 INP_INFO_RLOCK(&V_tcbinfo); 1306 INP_WLOCK(inp); 1307 tp = intotcpcb(inp); 1308 1309 CTR4(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x", 1310 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags); 1311 1312 if (toep->flags & TPF_ABORT_SHUTDOWN) 1313 goto done; 1314 1315 so = inp->inp_socket; 1316 tp->snd_una = be32toh(cpl->snd_nxt) - 1; /* exclude FIN */ 1317 1318 switch (tp->t_state) { 1319 case TCPS_CLOSING: /* see TCPS_FIN_WAIT_2 in do_peer_close too */ 1320 tcp_twstart(tp); 1321 release: 1322 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1323 INP_INFO_RUNLOCK(&V_tcbinfo); 1324 CURVNET_RESTORE(); 1325 1326 INP_WLOCK(inp); 1327 final_cpl_received(toep); /* no more CPLs expected */ 1328 1329 return (0); 1330 case TCPS_LAST_ACK: 1331 if (tcp_close(tp)) 1332 INP_WUNLOCK(inp); 1333 goto release; 1334 1335 case TCPS_FIN_WAIT_1: 1336 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 1337 soisdisconnected(so); 1338 tp->t_state = TCPS_FIN_WAIT_2; 1339 break; 1340 1341 default: 1342 log(LOG_ERR, 1343 "%s: TID %u received CPL_CLOSE_CON_RPL in state %s\n", 1344 __func__, tid, tcpstates[tp->t_state]); 1345 } 1346 done: 1347 INP_WUNLOCK(inp); 1348 INP_INFO_RUNLOCK(&V_tcbinfo); 1349 CURVNET_RESTORE(); 1350 return (0); 1351 } 1352 1353 void 1354 send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid, 1355 int rst_status) 1356 { 1357 struct wrqe *wr; 1358 struct cpl_abort_rpl *cpl; 1359 1360 wr = alloc_wrqe(sizeof(*cpl), ofld_txq); 1361 if (wr == NULL) { 1362 /* XXX */ 1363 panic("%s: allocation failure.", __func__); 1364 } 1365 cpl = wrtod(wr); 1366 1367 INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid); 1368 cpl->cmd = rst_status; 1369 1370 t4_wrq_tx(sc, wr); 1371 } 1372 1373 static int 1374 abort_status_to_errno(struct tcpcb *tp, unsigned int abort_reason) 1375 { 1376 switch (abort_reason) { 1377 case CPL_ERR_BAD_SYN: 1378 case CPL_ERR_CONN_RESET: 1379 return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET); 1380 case CPL_ERR_XMIT_TIMEDOUT: 1381 case CPL_ERR_PERSIST_TIMEDOUT: 1382 case CPL_ERR_FINWAIT2_TIMEDOUT: 1383 case CPL_ERR_KEEPALIVE_TIMEDOUT: 1384 return (ETIMEDOUT); 1385 default: 1386 return (EIO); 1387 } 1388 } 1389 1390 /* 1391 * TCP RST from the peer, timeout, or some other such critical error. 1392 */ 1393 static int 1394 do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1395 { 1396 struct adapter *sc = iq->adapter; 1397 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); 1398 unsigned int tid = GET_TID(cpl); 1399 struct toepcb *toep = lookup_tid(sc, tid); 1400 struct sge_wrq *ofld_txq = toep->ofld_txq; 1401 struct inpcb *inp; 1402 struct tcpcb *tp; 1403 #ifdef INVARIANTS 1404 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1405 #endif 1406 1407 KASSERT(opcode == CPL_ABORT_REQ_RSS, 1408 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1409 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1410 1411 if (toep->flags & TPF_SYNQE) 1412 return (do_abort_req_synqe(iq, rss, m)); 1413 1414 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1415 1416 if (negative_advice(cpl->status)) { 1417 CTR4(KTR_CXGBE, "%s: negative advice %d for tid %d (0x%x)", 1418 __func__, cpl->status, tid, toep->flags); 1419 return (0); /* Ignore negative advice */ 1420 } 1421 1422 inp = toep->inp; 1423 CURVNET_SET(toep->vnet); 1424 INP_INFO_RLOCK(&V_tcbinfo); /* for tcp_close */ 1425 INP_WLOCK(inp); 1426 1427 tp = intotcpcb(inp); 1428 1429 CTR6(KTR_CXGBE, 1430 "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x, status %d", 1431 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, 1432 inp->inp_flags, cpl->status); 1433 1434 /* 1435 * If we'd initiated an abort earlier the reply to it is responsible for 1436 * cleaning up resources. Otherwise we tear everything down right here 1437 * right now. We owe the T4 a CPL_ABORT_RPL no matter what. 1438 */ 1439 if (toep->flags & TPF_ABORT_SHUTDOWN) { 1440 INP_WUNLOCK(inp); 1441 goto done; 1442 } 1443 toep->flags |= TPF_ABORT_SHUTDOWN; 1444 1445 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 1446 struct socket *so = inp->inp_socket; 1447 1448 if (so != NULL) 1449 so_error_set(so, abort_status_to_errno(tp, 1450 cpl->status)); 1451 tp = tcp_close(tp); 1452 if (tp == NULL) 1453 INP_WLOCK(inp); /* re-acquire */ 1454 } 1455 1456 final_cpl_received(toep); 1457 done: 1458 INP_INFO_RUNLOCK(&V_tcbinfo); 1459 CURVNET_RESTORE(); 1460 send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST); 1461 return (0); 1462 } 1463 1464 /* 1465 * Reply to the CPL_ABORT_REQ (send_reset) 1466 */ 1467 static int 1468 do_abort_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1469 { 1470 struct adapter *sc = iq->adapter; 1471 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); 1472 unsigned int tid = GET_TID(cpl); 1473 struct toepcb *toep = lookup_tid(sc, tid); 1474 struct inpcb *inp = toep->inp; 1475 #ifdef INVARIANTS 1476 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1477 #endif 1478 1479 KASSERT(opcode == CPL_ABORT_RPL_RSS, 1480 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1481 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1482 1483 if (toep->flags & TPF_SYNQE) 1484 return (do_abort_rpl_synqe(iq, rss, m)); 1485 1486 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1487 1488 CTR5(KTR_CXGBE, "%s: tid %u, toep %p, inp %p, status %d", 1489 __func__, tid, toep, inp, cpl->status); 1490 1491 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1492 ("%s: wasn't expecting abort reply", __func__)); 1493 1494 INP_WLOCK(inp); 1495 final_cpl_received(toep); 1496 1497 return (0); 1498 } 1499 1500 static int 1501 do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1502 { 1503 struct adapter *sc = iq->adapter; 1504 const struct cpl_rx_data *cpl = mtod(m, const void *); 1505 unsigned int tid = GET_TID(cpl); 1506 struct toepcb *toep = lookup_tid(sc, tid); 1507 struct inpcb *inp = toep->inp; 1508 struct tcpcb *tp; 1509 struct socket *so; 1510 struct sockbuf *sb; 1511 int len; 1512 uint32_t ddp_placed = 0; 1513 1514 if (__predict_false(toep->flags & TPF_SYNQE)) { 1515 #ifdef INVARIANTS 1516 struct synq_entry *synqe = (void *)toep; 1517 1518 INP_WLOCK(synqe->lctx->inp); 1519 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1520 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1521 ("%s: listen socket closed but tid %u not aborted.", 1522 __func__, tid)); 1523 } else { 1524 /* 1525 * do_pass_accept_req is still running and will 1526 * eventually take care of this tid. 1527 */ 1528 } 1529 INP_WUNLOCK(synqe->lctx->inp); 1530 #endif 1531 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1532 toep, toep->flags); 1533 m_freem(m); 1534 return (0); 1535 } 1536 1537 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1538 1539 /* strip off CPL header */ 1540 m_adj(m, sizeof(*cpl)); 1541 len = m->m_pkthdr.len; 1542 1543 INP_WLOCK(inp); 1544 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) { 1545 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", 1546 __func__, tid, len, inp->inp_flags); 1547 INP_WUNLOCK(inp); 1548 m_freem(m); 1549 return (0); 1550 } 1551 1552 tp = intotcpcb(inp); 1553 1554 if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq))) 1555 ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt; 1556 1557 tp->rcv_nxt += len; 1558 if (tp->rcv_wnd < len) { 1559 KASSERT(toep->ulp_mode == ULP_MODE_RDMA, 1560 ("%s: negative window size", __func__)); 1561 } 1562 1563 tp->rcv_wnd -= len; 1564 tp->t_rcvtime = ticks; 1565 1566 if (toep->ulp_mode == ULP_MODE_TCPDDP) 1567 DDP_LOCK(toep); 1568 so = inp_inpcbtosocket(inp); 1569 sb = &so->so_rcv; 1570 SOCKBUF_LOCK(sb); 1571 1572 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 1573 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)", 1574 __func__, tid, len); 1575 m_freem(m); 1576 SOCKBUF_UNLOCK(sb); 1577 if (toep->ulp_mode == ULP_MODE_TCPDDP) 1578 DDP_UNLOCK(toep); 1579 INP_WUNLOCK(inp); 1580 1581 CURVNET_SET(toep->vnet); 1582 INP_INFO_RLOCK(&V_tcbinfo); 1583 INP_WLOCK(inp); 1584 tp = tcp_drop(tp, ECONNRESET); 1585 if (tp) 1586 INP_WUNLOCK(inp); 1587 INP_INFO_RUNLOCK(&V_tcbinfo); 1588 CURVNET_RESTORE(); 1589 1590 return (0); 1591 } 1592 1593 /* receive buffer autosize */ 1594 MPASS(toep->vnet == so->so_vnet); 1595 CURVNET_SET(toep->vnet); 1596 if (sb->sb_flags & SB_AUTOSIZE && 1597 V_tcp_do_autorcvbuf && 1598 sb->sb_hiwat < V_tcp_autorcvbuf_max && 1599 len > (sbspace(sb) / 8 * 7)) { 1600 unsigned int hiwat = sb->sb_hiwat; 1601 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc, 1602 V_tcp_autorcvbuf_max); 1603 1604 if (!sbreserve_locked(sb, newsize, so, NULL)) 1605 sb->sb_flags &= ~SB_AUTOSIZE; 1606 else 1607 toep->rx_credits += newsize - hiwat; 1608 } 1609 1610 if (toep->ddp_waiting_count != 0 || toep->ddp_active_count != 0) 1611 CTR3(KTR_CXGBE, "%s: tid %u, non-ddp rx (%d bytes)", __func__, 1612 tid, len); 1613 1614 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1615 int changed = !(toep->ddp_flags & DDP_ON) ^ cpl->ddp_off; 1616 1617 if (changed) { 1618 if (toep->ddp_flags & DDP_SC_REQ) 1619 toep->ddp_flags ^= DDP_ON | DDP_SC_REQ; 1620 else { 1621 KASSERT(cpl->ddp_off == 1, 1622 ("%s: DDP switched on by itself.", 1623 __func__)); 1624 1625 /* Fell out of DDP mode */ 1626 toep->ddp_flags &= ~DDP_ON; 1627 CTR1(KTR_CXGBE, "%s: fell out of DDP mode", 1628 __func__); 1629 1630 insert_ddp_data(toep, ddp_placed); 1631 } 1632 } 1633 1634 if (toep->ddp_flags & DDP_ON) { 1635 /* 1636 * CPL_RX_DATA with DDP on can only be an indicate. 1637 * Start posting queued AIO requests via DDP. The 1638 * payload that arrived in this indicate is appended 1639 * to the socket buffer as usual. 1640 */ 1641 handle_ddp_indicate(toep); 1642 } 1643 } 1644 1645 KASSERT(toep->sb_cc >= sbused(sb), 1646 ("%s: sb %p has more data (%d) than last time (%d).", 1647 __func__, sb, sbused(sb), toep->sb_cc)); 1648 toep->rx_credits += toep->sb_cc - sbused(sb); 1649 sbappendstream_locked(sb, m, 0); 1650 toep->sb_cc = sbused(sb); 1651 if (toep->rx_credits > 0 && toep->sb_cc + tp->rcv_wnd < sb->sb_lowat) { 1652 int credits; 1653 1654 credits = send_rx_credits(sc, toep, toep->rx_credits); 1655 toep->rx_credits -= credits; 1656 tp->rcv_wnd += credits; 1657 tp->rcv_adv += credits; 1658 } 1659 1660 if (toep->ddp_waiting_count > 0 && sbavail(sb) != 0) { 1661 CTR2(KTR_CXGBE, "%s: tid %u queueing AIO task", __func__, 1662 tid); 1663 ddp_queue_toep(toep); 1664 } 1665 sorwakeup_locked(so); 1666 SOCKBUF_UNLOCK_ASSERT(sb); 1667 if (toep->ulp_mode == ULP_MODE_TCPDDP) 1668 DDP_UNLOCK(toep); 1669 1670 INP_WUNLOCK(inp); 1671 CURVNET_RESTORE(); 1672 return (0); 1673 } 1674 1675 #define S_CPL_FW4_ACK_OPCODE 24 1676 #define M_CPL_FW4_ACK_OPCODE 0xff 1677 #define V_CPL_FW4_ACK_OPCODE(x) ((x) << S_CPL_FW4_ACK_OPCODE) 1678 #define G_CPL_FW4_ACK_OPCODE(x) \ 1679 (((x) >> S_CPL_FW4_ACK_OPCODE) & M_CPL_FW4_ACK_OPCODE) 1680 1681 #define S_CPL_FW4_ACK_FLOWID 0 1682 #define M_CPL_FW4_ACK_FLOWID 0xffffff 1683 #define V_CPL_FW4_ACK_FLOWID(x) ((x) << S_CPL_FW4_ACK_FLOWID) 1684 #define G_CPL_FW4_ACK_FLOWID(x) \ 1685 (((x) >> S_CPL_FW4_ACK_FLOWID) & M_CPL_FW4_ACK_FLOWID) 1686 1687 #define S_CPL_FW4_ACK_CR 24 1688 #define M_CPL_FW4_ACK_CR 0xff 1689 #define V_CPL_FW4_ACK_CR(x) ((x) << S_CPL_FW4_ACK_CR) 1690 #define G_CPL_FW4_ACK_CR(x) (((x) >> S_CPL_FW4_ACK_CR) & M_CPL_FW4_ACK_CR) 1691 1692 #define S_CPL_FW4_ACK_SEQVAL 0 1693 #define M_CPL_FW4_ACK_SEQVAL 0x1 1694 #define V_CPL_FW4_ACK_SEQVAL(x) ((x) << S_CPL_FW4_ACK_SEQVAL) 1695 #define G_CPL_FW4_ACK_SEQVAL(x) \ 1696 (((x) >> S_CPL_FW4_ACK_SEQVAL) & M_CPL_FW4_ACK_SEQVAL) 1697 #define F_CPL_FW4_ACK_SEQVAL V_CPL_FW4_ACK_SEQVAL(1U) 1698 1699 static int 1700 do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1701 { 1702 struct adapter *sc = iq->adapter; 1703 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 1704 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 1705 struct toepcb *toep = lookup_tid(sc, tid); 1706 struct inpcb *inp; 1707 struct tcpcb *tp; 1708 struct socket *so; 1709 uint8_t credits = cpl->credits; 1710 struct ofld_tx_sdesc *txsd; 1711 int plen; 1712 #ifdef INVARIANTS 1713 unsigned int opcode = G_CPL_FW4_ACK_OPCODE(be32toh(OPCODE_TID(cpl))); 1714 #endif 1715 1716 /* 1717 * Very unusual case: we'd sent a flowc + abort_req for a synq entry and 1718 * now this comes back carrying the credits for the flowc. 1719 */ 1720 if (__predict_false(toep->flags & TPF_SYNQE)) { 1721 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1722 ("%s: credits for a synq entry %p", __func__, toep)); 1723 return (0); 1724 } 1725 1726 inp = toep->inp; 1727 1728 KASSERT(opcode == CPL_FW4_ACK, 1729 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1730 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1731 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1732 1733 INP_WLOCK(inp); 1734 1735 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) { 1736 INP_WUNLOCK(inp); 1737 return (0); 1738 } 1739 1740 KASSERT((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0, 1741 ("%s: inp_flags 0x%x", __func__, inp->inp_flags)); 1742 1743 tp = intotcpcb(inp); 1744 1745 if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) { 1746 tcp_seq snd_una = be32toh(cpl->snd_una); 1747 1748 #ifdef INVARIANTS 1749 if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) { 1750 log(LOG_ERR, 1751 "%s: unexpected seq# %x for TID %u, snd_una %x\n", 1752 __func__, snd_una, toep->tid, tp->snd_una); 1753 } 1754 #endif 1755 1756 if (tp->snd_una != snd_una) { 1757 tp->snd_una = snd_una; 1758 tp->ts_recent_age = tcp_ts_getticks(); 1759 } 1760 } 1761 1762 #ifdef VERBOSE_TRACES 1763 CTR3(KTR_CXGBE, "%s: tid %d credits %u", __func__, tid, credits); 1764 #endif 1765 so = inp->inp_socket; 1766 txsd = &toep->txsd[toep->txsd_cidx]; 1767 plen = 0; 1768 while (credits) { 1769 KASSERT(credits >= txsd->tx_credits, 1770 ("%s: too many (or partial) credits", __func__)); 1771 credits -= txsd->tx_credits; 1772 toep->tx_credits += txsd->tx_credits; 1773 plen += txsd->plen; 1774 txsd++; 1775 toep->txsd_avail++; 1776 KASSERT(toep->txsd_avail <= toep->txsd_total, 1777 ("%s: txsd avail > total", __func__)); 1778 if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) { 1779 txsd = &toep->txsd[0]; 1780 toep->txsd_cidx = 0; 1781 } 1782 } 1783 1784 if (toep->tx_credits == toep->tx_total) { 1785 toep->tx_nocompl = 0; 1786 toep->plen_nocompl = 0; 1787 } 1788 1789 if (toep->flags & TPF_TX_SUSPENDED && 1790 toep->tx_credits >= toep->tx_total / 4) { 1791 #ifdef VERBOSE_TRACES 1792 CTR2(KTR_CXGBE, "%s: tid %d calling t4_push_frames", __func__, 1793 tid); 1794 #endif 1795 toep->flags &= ~TPF_TX_SUSPENDED; 1796 CURVNET_SET(toep->vnet); 1797 if (toep->ulp_mode == ULP_MODE_ISCSI) 1798 t4_push_pdus(sc, toep, plen); 1799 else 1800 t4_push_frames(sc, toep, plen); 1801 CURVNET_RESTORE(); 1802 } else if (plen > 0) { 1803 struct sockbuf *sb = &so->so_snd; 1804 int sbu; 1805 1806 SOCKBUF_LOCK(sb); 1807 sbu = sbused(sb); 1808 if (toep->ulp_mode == ULP_MODE_ISCSI) { 1809 1810 if (__predict_false(sbu > 0)) { 1811 /* 1812 * The data trasmitted before the tid's ULP mode 1813 * changed to ISCSI is still in so_snd. 1814 * Incoming credits should account for so_snd 1815 * first. 1816 */ 1817 sbdrop_locked(sb, min(sbu, plen)); 1818 plen -= min(sbu, plen); 1819 } 1820 sowwakeup_locked(so); /* unlocks so_snd */ 1821 rqdrop_locked(&toep->ulp_pdu_reclaimq, plen); 1822 } else { 1823 #ifdef VERBOSE_TRACES 1824 CTR3(KTR_CXGBE, "%s: tid %d dropped %d bytes", __func__, 1825 tid, plen); 1826 #endif 1827 sbdrop_locked(sb, plen); 1828 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) 1829 t4_aiotx_queue_toep(toep); 1830 sowwakeup_locked(so); /* unlocks so_snd */ 1831 } 1832 SOCKBUF_UNLOCK_ASSERT(sb); 1833 } 1834 1835 INP_WUNLOCK(inp); 1836 1837 return (0); 1838 } 1839 1840 int 1841 do_set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1842 { 1843 struct adapter *sc = iq->adapter; 1844 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 1845 unsigned int tid = GET_TID(cpl); 1846 struct toepcb *toep; 1847 #ifdef INVARIANTS 1848 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1849 #endif 1850 1851 KASSERT(opcode == CPL_SET_TCB_RPL, 1852 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1853 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1854 MPASS(iq != &sc->sge.fwq); 1855 1856 toep = lookup_tid(sc, tid); 1857 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1858 handle_ddp_tcb_rpl(toep, cpl); 1859 return (0); 1860 } 1861 1862 /* 1863 * TOM and/or other ULPs don't request replies for CPL_SET_TCB or 1864 * CPL_SET_TCB_FIELD requests. This can easily change and when it does 1865 * the dispatch code will go here. 1866 */ 1867 #ifdef INVARIANTS 1868 panic("%s: Unexpected CPL_SET_TCB_RPL for tid %u on iq %p", __func__, 1869 tid, iq); 1870 #else 1871 log(LOG_ERR, "%s: Unexpected CPL_SET_TCB_RPL for tid %u on iq %p\n", 1872 __func__, tid, iq); 1873 #endif 1874 1875 return (0); 1876 } 1877 1878 void 1879 t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, int tid, 1880 uint16_t word, uint64_t mask, uint64_t val, int reply, int cookie, int iqid) 1881 { 1882 struct wrqe *wr; 1883 struct cpl_set_tcb_field *req; 1884 1885 MPASS((cookie & ~M_COOKIE) == 0); 1886 MPASS((iqid & ~M_QUEUENO) == 0); 1887 1888 wr = alloc_wrqe(sizeof(*req), wrq); 1889 if (wr == NULL) { 1890 /* XXX */ 1891 panic("%s: allocation failure.", __func__); 1892 } 1893 req = wrtod(wr); 1894 1895 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid); 1896 req->reply_ctrl = htobe16(V_QUEUENO(iqid)); 1897 if (reply == 0) 1898 req->reply_ctrl |= htobe16(F_NO_REPLY); 1899 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie)); 1900 req->mask = htobe64(mask); 1901 req->val = htobe64(val); 1902 1903 t4_wrq_tx(sc, wr); 1904 } 1905 1906 void 1907 t4_init_cpl_io_handlers(void) 1908 { 1909 1910 t4_register_cpl_handler(CPL_PEER_CLOSE, do_peer_close); 1911 t4_register_cpl_handler(CPL_CLOSE_CON_RPL, do_close_con_rpl); 1912 t4_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req); 1913 t4_register_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl); 1914 t4_register_cpl_handler(CPL_RX_DATA, do_rx_data); 1915 t4_register_cpl_handler(CPL_FW4_ACK, do_fw4_ack); 1916 } 1917 1918 void 1919 t4_uninit_cpl_io_handlers(void) 1920 { 1921 1922 t4_register_cpl_handler(CPL_PEER_CLOSE, NULL); 1923 t4_register_cpl_handler(CPL_CLOSE_CON_RPL, NULL); 1924 t4_register_cpl_handler(CPL_ABORT_REQ_RSS, NULL); 1925 t4_register_cpl_handler(CPL_ABORT_RPL_RSS, NULL); 1926 t4_register_cpl_handler(CPL_RX_DATA, NULL); 1927 t4_register_cpl_handler(CPL_FW4_ACK, NULL); 1928 } 1929 1930 /* 1931 * Use the 'backend3' field in AIO jobs to store the amount of data 1932 * sent by the AIO job so far and the 'backend4' field to hold an 1933 * error that should be reported when the job is completed. 1934 */ 1935 #define aio_sent backend3 1936 #define aio_error backend4 1937 1938 #define jobtotid(job) \ 1939 (((struct toepcb *)(so_sototcpcb((job)->fd_file->f_data)->t_toe))->tid) 1940 1941 static void 1942 free_aiotx_buffer(struct aiotx_buffer *ab) 1943 { 1944 struct kaiocb *job; 1945 long status; 1946 int error; 1947 1948 if (refcount_release(&ab->refcount) == 0) 1949 return; 1950 1951 job = ab->job; 1952 error = job->aio_error; 1953 status = job->aio_sent; 1954 vm_page_unhold_pages(ab->ps.pages, ab->ps.npages); 1955 free(ab, M_CXGBE); 1956 #ifdef VERBOSE_TRACES 1957 CTR5(KTR_CXGBE, "%s: tid %d completed %p len %ld, error %d", __func__, 1958 jobtotid(job), job, status, error); 1959 #endif 1960 if (error == ECANCELED && status != 0) 1961 error = 0; 1962 if (error == ECANCELED) 1963 aio_cancel(job); 1964 else if (error) 1965 aio_complete(job, -1, error); 1966 else 1967 aio_complete(job, status, 0); 1968 } 1969 1970 static void 1971 t4_aiotx_mbuf_free(struct mbuf *m) 1972 { 1973 struct aiotx_buffer *ab = m->m_ext.ext_arg1; 1974 1975 #ifdef VERBOSE_TRACES 1976 CTR3(KTR_CXGBE, "%s: completed %d bytes for tid %d", __func__, 1977 m->m_len, jobtotid(ab->job)); 1978 #endif 1979 free_aiotx_buffer(ab); 1980 } 1981 1982 /* 1983 * Hold the buffer backing an AIO request and return an AIO transmit 1984 * buffer. 1985 */ 1986 static int 1987 hold_aio(struct kaiocb *job) 1988 { 1989 struct aiotx_buffer *ab; 1990 struct vmspace *vm; 1991 vm_map_t map; 1992 vm_offset_t start, end, pgoff; 1993 int n; 1994 1995 MPASS(job->backend1 == NULL); 1996 1997 /* 1998 * The AIO subsystem will cancel and drain all requests before 1999 * permitting a process to exit or exec, so p_vmspace should 2000 * be stable here. 2001 */ 2002 vm = job->userproc->p_vmspace; 2003 map = &vm->vm_map; 2004 start = (uintptr_t)job->uaiocb.aio_buf; 2005 pgoff = start & PAGE_MASK; 2006 end = round_page(start + job->uaiocb.aio_nbytes); 2007 start = trunc_page(start); 2008 n = atop(end - start); 2009 2010 ab = malloc(sizeof(*ab) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK | 2011 M_ZERO); 2012 refcount_init(&ab->refcount, 1); 2013 ab->ps.pages = (vm_page_t *)(ab + 1); 2014 ab->ps.npages = vm_fault_quick_hold_pages(map, start, end - start, 2015 VM_PROT_WRITE, ab->ps.pages, n); 2016 if (ab->ps.npages < 0) { 2017 free(ab, M_CXGBE); 2018 return (EFAULT); 2019 } 2020 2021 KASSERT(ab->ps.npages == n, 2022 ("hold_aio: page count mismatch: %d vs %d", ab->ps.npages, n)); 2023 2024 ab->ps.offset = pgoff; 2025 ab->ps.len = job->uaiocb.aio_nbytes; 2026 ab->job = job; 2027 job->backend1 = ab; 2028 #ifdef VERBOSE_TRACES 2029 CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d", 2030 __func__, jobtotid(job), &ab->ps, job, ab->ps.npages); 2031 #endif 2032 return (0); 2033 } 2034 2035 static void 2036 t4_aiotx_process_job(struct toepcb *toep, struct socket *so, struct kaiocb *job) 2037 { 2038 struct adapter *sc; 2039 struct sockbuf *sb; 2040 struct file *fp; 2041 struct aiotx_buffer *ab; 2042 struct inpcb *inp; 2043 struct tcpcb *tp; 2044 struct mbuf *m; 2045 int error; 2046 bool moretocome, sendmore; 2047 2048 sc = td_adapter(toep->td); 2049 sb = &so->so_snd; 2050 SOCKBUF_UNLOCK(sb); 2051 fp = job->fd_file; 2052 ab = job->backend1; 2053 m = NULL; 2054 2055 #ifdef MAC 2056 error = mac_socket_check_send(fp->f_cred, so); 2057 if (error != 0) 2058 goto out; 2059 #endif 2060 2061 if (ab == NULL) { 2062 error = hold_aio(job); 2063 if (error != 0) 2064 goto out; 2065 ab = job->backend1; 2066 } 2067 2068 /* Inline sosend_generic(). */ 2069 2070 job->msgsnd = 1; 2071 2072 error = sblock(sb, SBL_WAIT); 2073 MPASS(error == 0); 2074 2075 sendanother: 2076 m = m_get(M_WAITOK, MT_DATA); 2077 2078 SOCKBUF_LOCK(sb); 2079 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 2080 SOCKBUF_UNLOCK(sb); 2081 sbunlock(sb); 2082 if ((so->so_options & SO_NOSIGPIPE) == 0) { 2083 PROC_LOCK(job->userproc); 2084 kern_psignal(job->userproc, SIGPIPE); 2085 PROC_UNLOCK(job->userproc); 2086 } 2087 error = EPIPE; 2088 goto out; 2089 } 2090 if (so->so_error) { 2091 error = so->so_error; 2092 so->so_error = 0; 2093 SOCKBUF_UNLOCK(sb); 2094 sbunlock(sb); 2095 goto out; 2096 } 2097 if ((so->so_state & SS_ISCONNECTED) == 0) { 2098 SOCKBUF_UNLOCK(sb); 2099 sbunlock(sb); 2100 error = ENOTCONN; 2101 goto out; 2102 } 2103 if (sbspace(sb) < sb->sb_lowat) { 2104 MPASS(job->aio_sent == 0 || !(so->so_state & SS_NBIO)); 2105 2106 /* 2107 * Don't block if there is too little room in the socket 2108 * buffer. Instead, requeue the request. 2109 */ 2110 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) { 2111 SOCKBUF_UNLOCK(sb); 2112 sbunlock(sb); 2113 error = ECANCELED; 2114 goto out; 2115 } 2116 TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list); 2117 SOCKBUF_UNLOCK(sb); 2118 sbunlock(sb); 2119 goto out; 2120 } 2121 2122 /* 2123 * Write as much data as the socket permits, but no more than a 2124 * a single sndbuf at a time. 2125 */ 2126 m->m_len = sbspace(sb); 2127 if (m->m_len > ab->ps.len - job->aio_sent) { 2128 m->m_len = ab->ps.len - job->aio_sent; 2129 moretocome = false; 2130 } else 2131 moretocome = true; 2132 if (m->m_len > sc->tt.sndbuf) { 2133 m->m_len = sc->tt.sndbuf; 2134 sendmore = true; 2135 } else 2136 sendmore = false; 2137 2138 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) 2139 moretocome = true; 2140 SOCKBUF_UNLOCK(sb); 2141 MPASS(m->m_len != 0); 2142 2143 /* Inlined tcp_usr_send(). */ 2144 2145 inp = toep->inp; 2146 INP_WLOCK(inp); 2147 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 2148 INP_WUNLOCK(inp); 2149 sbunlock(sb); 2150 error = ECONNRESET; 2151 goto out; 2152 } 2153 2154 refcount_acquire(&ab->refcount); 2155 m_extadd(m, NULL, ab->ps.len, t4_aiotx_mbuf_free, ab, 2156 (void *)(uintptr_t)job->aio_sent, 0, EXT_NET_DRV); 2157 m->m_ext.ext_flags |= EXT_FLAG_AIOTX; 2158 job->aio_sent += m->m_len; 2159 2160 sbappendstream(sb, m, 0); 2161 m = NULL; 2162 2163 if (!(inp->inp_flags & INP_DROPPED)) { 2164 tp = intotcpcb(inp); 2165 if (moretocome) 2166 tp->t_flags |= TF_MORETOCOME; 2167 error = tp->t_fb->tfb_tcp_output(tp); 2168 if (moretocome) 2169 tp->t_flags &= ~TF_MORETOCOME; 2170 } 2171 2172 INP_WUNLOCK(inp); 2173 if (sendmore) 2174 goto sendanother; 2175 sbunlock(sb); 2176 2177 if (error) 2178 goto out; 2179 2180 /* 2181 * If this is a non-blocking socket and the request has not 2182 * been fully completed, requeue it until the socket is ready 2183 * again. 2184 */ 2185 if (job->aio_sent < job->uaiocb.aio_nbytes && 2186 !(so->so_state & SS_NBIO)) { 2187 SOCKBUF_LOCK(sb); 2188 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) { 2189 SOCKBUF_UNLOCK(sb); 2190 error = ECANCELED; 2191 goto out; 2192 } 2193 TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list); 2194 return; 2195 } 2196 2197 /* 2198 * If the request will not be requeued, drop a reference on 2199 * the aiotx buffer. Any mbufs in flight should still 2200 * contain a reference, but this drops the reference that the 2201 * job owns while it is waiting to queue mbufs to the socket. 2202 */ 2203 free_aiotx_buffer(ab); 2204 2205 out: 2206 if (error) { 2207 if (ab != NULL) { 2208 job->aio_error = error; 2209 free_aiotx_buffer(ab); 2210 } else { 2211 MPASS(job->aio_sent == 0); 2212 aio_complete(job, -1, error); 2213 } 2214 } 2215 if (m != NULL) 2216 m_free(m); 2217 SOCKBUF_LOCK(sb); 2218 } 2219 2220 static void 2221 t4_aiotx_task(void *context, int pending) 2222 { 2223 struct toepcb *toep = context; 2224 struct inpcb *inp = toep->inp; 2225 struct socket *so = inp->inp_socket; 2226 struct kaiocb *job; 2227 2228 CURVNET_SET(toep->vnet); 2229 SOCKBUF_LOCK(&so->so_snd); 2230 while (!TAILQ_EMPTY(&toep->aiotx_jobq) && sowriteable(so)) { 2231 job = TAILQ_FIRST(&toep->aiotx_jobq); 2232 TAILQ_REMOVE(&toep->aiotx_jobq, job, list); 2233 if (!aio_clear_cancel_function(job)) 2234 continue; 2235 2236 t4_aiotx_process_job(toep, so, job); 2237 } 2238 toep->aiotx_task_active = false; 2239 SOCKBUF_UNLOCK(&so->so_snd); 2240 CURVNET_RESTORE(); 2241 2242 free_toepcb(toep); 2243 } 2244 2245 static void 2246 t4_aiotx_queue_toep(struct toepcb *toep) 2247 { 2248 2249 SOCKBUF_LOCK_ASSERT(&toep->inp->inp_socket->so_snd); 2250 #ifdef VERBOSE_TRACES 2251 CTR3(KTR_CXGBE, "%s: queueing aiotx task for tid %d, active = %s", 2252 __func__, toep->tid, toep->aiotx_task_active ? "true" : "false"); 2253 #endif 2254 if (toep->aiotx_task_active) 2255 return; 2256 toep->aiotx_task_active = true; 2257 hold_toepcb(toep); 2258 soaio_enqueue(&toep->aiotx_task); 2259 } 2260 2261 static void 2262 t4_aiotx_cancel(struct kaiocb *job) 2263 { 2264 struct aiotx_buffer *ab; 2265 struct socket *so; 2266 struct sockbuf *sb; 2267 struct tcpcb *tp; 2268 struct toepcb *toep; 2269 2270 so = job->fd_file->f_data; 2271 tp = so_sototcpcb(so); 2272 toep = tp->t_toe; 2273 MPASS(job->uaiocb.aio_lio_opcode == LIO_WRITE); 2274 sb = &so->so_snd; 2275 2276 SOCKBUF_LOCK(sb); 2277 if (!aio_cancel_cleared(job)) 2278 TAILQ_REMOVE(&toep->aiotx_jobq, job, list); 2279 SOCKBUF_UNLOCK(sb); 2280 2281 ab = job->backend1; 2282 if (ab != NULL) 2283 free_aiotx_buffer(ab); 2284 else 2285 aio_cancel(job); 2286 } 2287 2288 int 2289 t4_aio_queue_aiotx(struct socket *so, struct kaiocb *job) 2290 { 2291 struct tcpcb *tp = so_sototcpcb(so); 2292 struct toepcb *toep = tp->t_toe; 2293 struct adapter *sc = td_adapter(toep->td); 2294 2295 /* This only handles writes. */ 2296 if (job->uaiocb.aio_lio_opcode != LIO_WRITE) 2297 return (EOPNOTSUPP); 2298 2299 if (!sc->tt.tx_zcopy) 2300 return (EOPNOTSUPP); 2301 2302 SOCKBUF_LOCK(&so->so_snd); 2303 #ifdef VERBOSE_TRACES 2304 CTR2(KTR_CXGBE, "%s: queueing %p", __func__, job); 2305 #endif 2306 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) 2307 panic("new job was cancelled"); 2308 TAILQ_INSERT_TAIL(&toep->aiotx_jobq, job, list); 2309 if (sowriteable(so)) 2310 t4_aiotx_queue_toep(toep); 2311 SOCKBUF_UNLOCK(&so->so_snd); 2312 return (0); 2313 } 2314 2315 void 2316 aiotx_init_toep(struct toepcb *toep) 2317 { 2318 2319 TAILQ_INIT(&toep->aiotx_jobq); 2320 TASK_INIT(&toep->aiotx_task, 0, t4_aiotx_task, toep); 2321 } 2322 #endif 2323