1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012, 2015 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 #include "opt_ratelimit.h" 36 37 #ifdef TCP_OFFLOAD 38 #include <sys/param.h> 39 #include <sys/aio.h> 40 #include <sys/file.h> 41 #include <sys/kernel.h> 42 #include <sys/ktr.h> 43 #include <sys/module.h> 44 #include <sys/proc.h> 45 #include <sys/protosw.h> 46 #include <sys/domain.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/sglist.h> 50 #include <sys/taskqueue.h> 51 #include <netinet/in.h> 52 #include <netinet/in_pcb.h> 53 #include <netinet/ip.h> 54 #include <netinet/ip6.h> 55 #define TCPSTATES 56 #include <netinet/tcp_fsm.h> 57 #include <netinet/tcp_seq.h> 58 #include <netinet/tcp_var.h> 59 #include <netinet/toecore.h> 60 61 #include <security/mac/mac_framework.h> 62 63 #include <vm/vm.h> 64 #include <vm/vm_extern.h> 65 #include <vm/pmap.h> 66 #include <vm/vm_map.h> 67 #include <vm/vm_page.h> 68 69 #include "common/common.h" 70 #include "common/t4_msg.h" 71 #include "common/t4_regs.h" 72 #include "common/t4_tcb.h" 73 #include "tom/t4_tom_l2t.h" 74 #include "tom/t4_tom.h" 75 76 static void t4_aiotx_cancel(struct kaiocb *job); 77 static void t4_aiotx_queue_toep(struct toepcb *toep); 78 79 static size_t 80 aiotx_mbuf_pgoff(struct mbuf *m) 81 { 82 struct aiotx_buffer *ab; 83 84 MPASS(IS_AIOTX_MBUF(m)); 85 ab = m->m_ext.ext_arg1; 86 return ((ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) % PAGE_SIZE); 87 } 88 89 static vm_page_t * 90 aiotx_mbuf_pages(struct mbuf *m) 91 { 92 struct aiotx_buffer *ab; 93 int npages; 94 95 MPASS(IS_AIOTX_MBUF(m)); 96 ab = m->m_ext.ext_arg1; 97 npages = (ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) / PAGE_SIZE; 98 return (ab->ps.pages + npages); 99 } 100 101 void 102 send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp) 103 { 104 struct wrqe *wr; 105 struct fw_flowc_wr *flowc; 106 unsigned int nparams, flowclen, paramidx; 107 struct vi_info *vi = toep->vi; 108 struct port_info *pi = vi->pi; 109 struct adapter *sc = pi->adapter; 110 unsigned int pfvf = sc->pf << S_FW_VIID_PFN; 111 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 112 113 KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT), 114 ("%s: flowc for tid %u sent already", __func__, toep->tid)); 115 116 if (ftxp != NULL) 117 nparams = 8; 118 else 119 nparams = 6; 120 if (toep->ulp_mode == ULP_MODE_TLS) 121 nparams++; 122 if (toep->tls.fcplenmax != 0) 123 nparams++; 124 if (toep->tc_idx != -1) { 125 MPASS(toep->tc_idx >= 0 && 126 toep->tc_idx < sc->chip_params->nsched_cls); 127 nparams++; 128 } 129 130 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); 131 132 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq); 133 if (wr == NULL) { 134 /* XXX */ 135 panic("%s: allocation failure.", __func__); 136 } 137 flowc = wrtod(wr); 138 memset(flowc, 0, wr->wr_len); 139 140 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 141 V_FW_FLOWC_WR_NPARAMS(nparams)); 142 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | 143 V_FW_WR_FLOWID(toep->tid)); 144 145 #define FLOWC_PARAM(__m, __v) \ 146 do { \ 147 flowc->mnemval[paramidx].mnemonic = FW_FLOWC_MNEM_##__m; \ 148 flowc->mnemval[paramidx].val = htobe32(__v); \ 149 paramidx++; \ 150 } while (0) 151 152 paramidx = 0; 153 154 FLOWC_PARAM(PFNVFN, pfvf); 155 FLOWC_PARAM(CH, pi->tx_chan); 156 FLOWC_PARAM(PORT, pi->tx_chan); 157 FLOWC_PARAM(IQID, toep->ofld_rxq->iq.abs_id); 158 if (ftxp) { 159 uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf); 160 161 FLOWC_PARAM(SNDNXT, ftxp->snd_nxt); 162 FLOWC_PARAM(RCVNXT, ftxp->rcv_nxt); 163 FLOWC_PARAM(SNDBUF, sndbuf); 164 FLOWC_PARAM(MSS, ftxp->mss); 165 166 CTR6(KTR_CXGBE, 167 "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x", 168 __func__, toep->tid, ftxp->mss, sndbuf, ftxp->snd_nxt, 169 ftxp->rcv_nxt); 170 } else { 171 FLOWC_PARAM(SNDBUF, 512); 172 FLOWC_PARAM(MSS, 512); 173 174 CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); 175 } 176 if (toep->ulp_mode == ULP_MODE_TLS) 177 FLOWC_PARAM(ULP_MODE, toep->ulp_mode); 178 if (toep->tls.fcplenmax != 0) 179 FLOWC_PARAM(TXDATAPLEN_MAX, toep->tls.fcplenmax); 180 if (toep->tc_idx != -1) 181 FLOWC_PARAM(SCHEDCLASS, toep->tc_idx); 182 #undef FLOWC_PARAM 183 184 KASSERT(paramidx == nparams, ("nparams mismatch")); 185 186 txsd->tx_credits = howmany(flowclen, 16); 187 txsd->plen = 0; 188 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 189 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 190 toep->tx_credits -= txsd->tx_credits; 191 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 192 toep->txsd_pidx = 0; 193 toep->txsd_avail--; 194 195 toep->flags |= TPF_FLOWC_WR_SENT; 196 t4_wrq_tx(sc, wr); 197 } 198 199 #ifdef RATELIMIT 200 /* 201 * Input is Bytes/second (so_max_pacing_rate), chip counts in Kilobits/second. 202 */ 203 static int 204 update_tx_rate_limit(struct adapter *sc, struct toepcb *toep, u_int Bps) 205 { 206 int tc_idx, rc; 207 const u_int kbps = (u_int) (uint64_t)Bps * 8ULL / 1000; 208 const int port_id = toep->vi->pi->port_id; 209 210 CTR3(KTR_CXGBE, "%s: tid %u, rate %uKbps", __func__, toep->tid, kbps); 211 212 if (kbps == 0) { 213 /* unbind */ 214 tc_idx = -1; 215 } else { 216 rc = t4_reserve_cl_rl_kbps(sc, port_id, kbps, &tc_idx); 217 if (rc != 0) 218 return (rc); 219 MPASS(tc_idx >= 0 && tc_idx < sc->chip_params->nsched_cls); 220 } 221 222 if (toep->tc_idx != tc_idx) { 223 struct wrqe *wr; 224 struct fw_flowc_wr *flowc; 225 int nparams = 1, flowclen, flowclen16; 226 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 227 228 flowclen = sizeof(*flowc) + nparams * sizeof(struct 229 fw_flowc_mnemval); 230 flowclen16 = howmany(flowclen, 16); 231 if (toep->tx_credits < flowclen16 || toep->txsd_avail == 0 || 232 (wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq)) == NULL) { 233 if (tc_idx >= 0) 234 t4_release_cl_rl(sc, port_id, tc_idx); 235 return (ENOMEM); 236 } 237 238 flowc = wrtod(wr); 239 memset(flowc, 0, wr->wr_len); 240 241 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 242 V_FW_FLOWC_WR_NPARAMS(nparams)); 243 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(flowclen16) | 244 V_FW_WR_FLOWID(toep->tid)); 245 246 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; 247 if (tc_idx == -1) 248 flowc->mnemval[0].val = htobe32(0xff); 249 else 250 flowc->mnemval[0].val = htobe32(tc_idx); 251 252 txsd->tx_credits = flowclen16; 253 txsd->plen = 0; 254 toep->tx_credits -= txsd->tx_credits; 255 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 256 toep->txsd_pidx = 0; 257 toep->txsd_avail--; 258 t4_wrq_tx(sc, wr); 259 } 260 261 if (toep->tc_idx >= 0) 262 t4_release_cl_rl(sc, port_id, toep->tc_idx); 263 toep->tc_idx = tc_idx; 264 265 return (0); 266 } 267 #endif 268 269 void 270 send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt) 271 { 272 struct wrqe *wr; 273 struct cpl_abort_req *req; 274 int tid = toep->tid; 275 struct inpcb *inp = toep->inp; 276 struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */ 277 278 INP_WLOCK_ASSERT(inp); 279 280 CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s", 281 __func__, toep->tid, 282 inp->inp_flags & INP_DROPPED ? "inp dropped" : 283 tcpstates[tp->t_state], 284 toep->flags, inp->inp_flags, 285 toep->flags & TPF_ABORT_SHUTDOWN ? 286 " (abort already in progress)" : ""); 287 288 if (toep->flags & TPF_ABORT_SHUTDOWN) 289 return; /* abort already in progress */ 290 291 toep->flags |= TPF_ABORT_SHUTDOWN; 292 293 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 294 ("%s: flowc_wr not sent for tid %d.", __func__, tid)); 295 296 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 297 if (wr == NULL) { 298 /* XXX */ 299 panic("%s: allocation failure.", __func__); 300 } 301 req = wrtod(wr); 302 303 INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid); 304 if (inp->inp_flags & INP_DROPPED) 305 req->rsvd0 = htobe32(snd_nxt); 306 else 307 req->rsvd0 = htobe32(tp->snd_nxt); 308 req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT); 309 req->cmd = CPL_ABORT_SEND_RST; 310 311 /* 312 * XXX: What's the correct way to tell that the inp hasn't been detached 313 * from its socket? Should I even be flushing the snd buffer here? 314 */ 315 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 316 struct socket *so = inp->inp_socket; 317 318 if (so != NULL) /* because I'm not sure. See comment above */ 319 sbflush(&so->so_snd); 320 } 321 322 t4_l2t_send(sc, wr, toep->l2te); 323 } 324 325 /* 326 * Called when a connection is established to translate the TCP options 327 * reported by HW to FreeBSD's native format. 328 */ 329 static void 330 assign_rxopt(struct tcpcb *tp, unsigned int opt) 331 { 332 struct toepcb *toep = tp->t_toe; 333 struct inpcb *inp = tp->t_inpcb; 334 struct adapter *sc = td_adapter(toep->td); 335 int n; 336 337 INP_LOCK_ASSERT(inp); 338 339 if (inp->inp_inc.inc_flags & INC_ISIPV6) 340 n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 341 else 342 n = sizeof(struct ip) + sizeof(struct tcphdr); 343 tp->t_maxseg = sc->params.mtus[G_TCPOPT_MSS(opt)] - n; 344 345 if (G_TCPOPT_TSTAMP(opt)) { 346 tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */ 347 tp->ts_recent = 0; /* hmmm */ 348 tp->ts_recent_age = tcp_ts_getticks(); 349 tp->t_maxseg -= TCPOLEN_TSTAMP_APPA; 350 } 351 352 CTR5(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u), mss %u", __func__, 353 toep->tid, G_TCPOPT_MSS(opt), sc->params.mtus[G_TCPOPT_MSS(opt)], 354 tp->t_maxseg); 355 356 if (G_TCPOPT_SACK(opt)) 357 tp->t_flags |= TF_SACK_PERMIT; /* should already be set */ 358 else 359 tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */ 360 361 if (G_TCPOPT_WSCALE_OK(opt)) 362 tp->t_flags |= TF_RCVD_SCALE; 363 364 /* Doing window scaling? */ 365 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 366 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 367 tp->rcv_scale = tp->request_r_scale; 368 tp->snd_scale = G_TCPOPT_SND_WSCALE(opt); 369 } 370 } 371 372 /* 373 * Completes some final bits of initialization for just established connections 374 * and changes their state to TCPS_ESTABLISHED. 375 * 376 * The ISNs are from the exchange of SYNs. 377 */ 378 void 379 make_established(struct toepcb *toep, uint32_t iss, uint32_t irs, uint16_t opt) 380 { 381 struct inpcb *inp = toep->inp; 382 struct socket *so = inp->inp_socket; 383 struct tcpcb *tp = intotcpcb(inp); 384 long bufsize; 385 uint16_t tcpopt = be16toh(opt); 386 struct flowc_tx_params ftxp; 387 388 INP_WLOCK_ASSERT(inp); 389 KASSERT(tp->t_state == TCPS_SYN_SENT || 390 tp->t_state == TCPS_SYN_RECEIVED, 391 ("%s: TCP state %s", __func__, tcpstates[tp->t_state])); 392 393 CTR6(KTR_CXGBE, "%s: tid %d, so %p, inp %p, tp %p, toep %p", 394 __func__, toep->tid, so, inp, tp, toep); 395 396 tcp_state_change(tp, TCPS_ESTABLISHED); 397 tp->t_starttime = ticks; 398 TCPSTAT_INC(tcps_connects); 399 400 tp->irs = irs; 401 tcp_rcvseqinit(tp); 402 tp->rcv_wnd = toep->opt0_rcv_bufsize << 10; 403 tp->rcv_adv += tp->rcv_wnd; 404 tp->last_ack_sent = tp->rcv_nxt; 405 406 tp->iss = iss; 407 tcp_sendseqinit(tp); 408 tp->snd_una = iss + 1; 409 tp->snd_nxt = iss + 1; 410 tp->snd_max = iss + 1; 411 412 assign_rxopt(tp, tcpopt); 413 414 SOCKBUF_LOCK(&so->so_snd); 415 if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf) 416 bufsize = V_tcp_autosndbuf_max; 417 else 418 bufsize = sbspace(&so->so_snd); 419 SOCKBUF_UNLOCK(&so->so_snd); 420 421 ftxp.snd_nxt = tp->snd_nxt; 422 ftxp.rcv_nxt = tp->rcv_nxt; 423 ftxp.snd_space = bufsize; 424 ftxp.mss = tp->t_maxseg; 425 send_flowc_wr(toep, &ftxp); 426 427 soisconnected(so); 428 } 429 430 int 431 send_rx_credits(struct adapter *sc, struct toepcb *toep, int credits) 432 { 433 struct wrqe *wr; 434 struct cpl_rx_data_ack *req; 435 uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); 436 437 KASSERT(credits >= 0, ("%s: %d credits", __func__, credits)); 438 439 wr = alloc_wrqe(sizeof(*req), toep->ctrlq); 440 if (wr == NULL) 441 return (0); 442 req = wrtod(wr); 443 444 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); 445 req->credit_dack = htobe32(dack | V_RX_CREDITS(credits)); 446 447 t4_wrq_tx(sc, wr); 448 return (credits); 449 } 450 451 void 452 send_rx_modulate(struct adapter *sc, struct toepcb *toep) 453 { 454 struct wrqe *wr; 455 struct cpl_rx_data_ack *req; 456 457 wr = alloc_wrqe(sizeof(*req), toep->ctrlq); 458 if (wr == NULL) 459 return; 460 req = wrtod(wr); 461 462 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); 463 req->credit_dack = htobe32(F_RX_MODULATE_RX); 464 465 t4_wrq_tx(sc, wr); 466 } 467 468 void 469 t4_rcvd_locked(struct toedev *tod, struct tcpcb *tp) 470 { 471 struct adapter *sc = tod->tod_softc; 472 struct inpcb *inp = tp->t_inpcb; 473 struct socket *so = inp->inp_socket; 474 struct sockbuf *sb = &so->so_rcv; 475 struct toepcb *toep = tp->t_toe; 476 int rx_credits; 477 478 INP_WLOCK_ASSERT(inp); 479 SOCKBUF_LOCK_ASSERT(sb); 480 481 rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0; 482 if (toep->ulp_mode == ULP_MODE_TLS) { 483 if (toep->tls.rcv_over >= rx_credits) { 484 toep->tls.rcv_over -= rx_credits; 485 rx_credits = 0; 486 } else { 487 rx_credits -= toep->tls.rcv_over; 488 toep->tls.rcv_over = 0; 489 } 490 } 491 492 if (rx_credits > 0 && 493 (tp->rcv_wnd <= 32 * 1024 || rx_credits >= 64 * 1024 || 494 (rx_credits >= 16 * 1024 && tp->rcv_wnd <= 128 * 1024) || 495 sbused(sb) + tp->rcv_wnd < sb->sb_lowat)) { 496 rx_credits = send_rx_credits(sc, toep, rx_credits); 497 tp->rcv_wnd += rx_credits; 498 tp->rcv_adv += rx_credits; 499 } else if (toep->flags & TPF_FORCE_CREDITS) 500 send_rx_modulate(sc, toep); 501 } 502 503 void 504 t4_rcvd(struct toedev *tod, struct tcpcb *tp) 505 { 506 struct inpcb *inp = tp->t_inpcb; 507 struct socket *so = inp->inp_socket; 508 struct sockbuf *sb = &so->so_rcv; 509 510 SOCKBUF_LOCK(sb); 511 t4_rcvd_locked(tod, tp); 512 SOCKBUF_UNLOCK(sb); 513 } 514 515 /* 516 * Close a connection by sending a CPL_CLOSE_CON_REQ message. 517 */ 518 int 519 t4_close_conn(struct adapter *sc, struct toepcb *toep) 520 { 521 struct wrqe *wr; 522 struct cpl_close_con_req *req; 523 unsigned int tid = toep->tid; 524 525 CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid, 526 toep->flags & TPF_FIN_SENT ? ", IGNORED" : ""); 527 528 if (toep->flags & TPF_FIN_SENT) 529 return (0); 530 531 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 532 ("%s: flowc_wr not sent for tid %u.", __func__, tid)); 533 534 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 535 if (wr == NULL) { 536 /* XXX */ 537 panic("%s: allocation failure.", __func__); 538 } 539 req = wrtod(wr); 540 541 req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | 542 V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr))); 543 req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) | 544 V_FW_WR_FLOWID(tid)); 545 req->wr.wr_lo = cpu_to_be64(0); 546 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 547 req->rsvd = 0; 548 549 toep->flags |= TPF_FIN_SENT; 550 toep->flags &= ~TPF_SEND_FIN; 551 t4_l2t_send(sc, wr, toep->l2te); 552 553 return (0); 554 } 555 556 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16) 557 #define MIN_OFLD_TX_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16)) 558 559 /* Maximum amount of immediate data we could stuff in a WR */ 560 static inline int 561 max_imm_payload(int tx_credits) 562 { 563 const int n = 2; /* Use only up to 2 desc for imm. data WR */ 564 565 KASSERT(tx_credits >= 0 && 566 tx_credits <= MAX_OFLD_TX_CREDITS, 567 ("%s: %d credits", __func__, tx_credits)); 568 569 if (tx_credits < MIN_OFLD_TX_CREDITS) 570 return (0); 571 572 if (tx_credits >= (n * EQ_ESIZE) / 16) 573 return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr)); 574 else 575 return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr)); 576 } 577 578 /* Maximum number of SGL entries we could stuff in a WR */ 579 static inline int 580 max_dsgl_nsegs(int tx_credits) 581 { 582 int nseg = 1; /* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */ 583 int sge_pair_credits = tx_credits - MIN_OFLD_TX_CREDITS; 584 585 KASSERT(tx_credits >= 0 && 586 tx_credits <= MAX_OFLD_TX_CREDITS, 587 ("%s: %d credits", __func__, tx_credits)); 588 589 if (tx_credits < MIN_OFLD_TX_CREDITS) 590 return (0); 591 592 nseg += 2 * (sge_pair_credits * 16 / 24); 593 if ((sge_pair_credits * 16) % 24 == 16) 594 nseg++; 595 596 return (nseg); 597 } 598 599 static inline void 600 write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen, 601 unsigned int plen, uint8_t credits, int shove, int ulp_submode, int txalign) 602 { 603 struct fw_ofld_tx_data_wr *txwr = dst; 604 605 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) | 606 V_FW_WR_IMMDLEN(immdlen)); 607 txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) | 608 V_FW_WR_LEN16(credits)); 609 txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(toep->ulp_mode) | 610 V_TX_ULP_SUBMODE(ulp_submode) | V_TX_URG(0) | V_TX_SHOVE(shove)); 611 txwr->plen = htobe32(plen); 612 613 if (txalign > 0) { 614 struct tcpcb *tp = intotcpcb(toep->inp); 615 616 if (plen < 2 * tp->t_maxseg) 617 txwr->lsodisable_to_flags |= 618 htobe32(F_FW_OFLD_TX_DATA_WR_LSODISABLE); 619 else 620 txwr->lsodisable_to_flags |= 621 htobe32(F_FW_OFLD_TX_DATA_WR_ALIGNPLD | 622 (tp->t_flags & TF_NODELAY ? 0 : 623 F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE)); 624 } 625 } 626 627 /* 628 * Generate a DSGL from a starting mbuf. The total number of segments and the 629 * maximum segments in any one mbuf are provided. 630 */ 631 static void 632 write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n) 633 { 634 struct mbuf *m; 635 struct ulptx_sgl *usgl = dst; 636 int i, j, rc; 637 struct sglist sg; 638 struct sglist_seg segs[n]; 639 640 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__)); 641 642 sglist_init(&sg, n, segs); 643 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 644 V_ULPTX_NSGE(nsegs)); 645 646 i = -1; 647 for (m = start; m != stop; m = m->m_next) { 648 if (IS_AIOTX_MBUF(m)) 649 rc = sglist_append_vmpages(&sg, aiotx_mbuf_pages(m), 650 aiotx_mbuf_pgoff(m), m->m_len); 651 else 652 rc = sglist_append(&sg, mtod(m, void *), m->m_len); 653 if (__predict_false(rc != 0)) 654 panic("%s: sglist_append %d", __func__, rc); 655 656 for (j = 0; j < sg.sg_nseg; i++, j++) { 657 if (i < 0) { 658 usgl->len0 = htobe32(segs[j].ss_len); 659 usgl->addr0 = htobe64(segs[j].ss_paddr); 660 } else { 661 usgl->sge[i / 2].len[i & 1] = 662 htobe32(segs[j].ss_len); 663 usgl->sge[i / 2].addr[i & 1] = 664 htobe64(segs[j].ss_paddr); 665 } 666 #ifdef INVARIANTS 667 nsegs--; 668 #endif 669 } 670 sglist_reset(&sg); 671 } 672 if (i & 1) 673 usgl->sge[i / 2].len[1] = htobe32(0); 674 KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, stop %p", 675 __func__, nsegs, start, stop)); 676 } 677 678 /* 679 * Max number of SGL entries an offload tx work request can have. This is 41 680 * (1 + 40) for a full 512B work request. 681 * fw_ofld_tx_data_wr(16B) + ulptx_sgl(16B, 1) + ulptx_sge_pair(480B, 40) 682 */ 683 #define OFLD_SGL_LEN (41) 684 685 /* 686 * Send data and/or a FIN to the peer. 687 * 688 * The socket's so_snd buffer consists of a stream of data starting with sb_mb 689 * and linked together with m_next. sb_sndptr, if set, is the last mbuf that 690 * was transmitted. 691 * 692 * drop indicates the number of bytes that should be dropped from the head of 693 * the send buffer. It is an optimization that lets do_fw4_ack avoid creating 694 * contention on the send buffer lock (before this change it used to do 695 * sowwakeup and then t4_push_frames right after that when recovering from tx 696 * stalls). When drop is set this function MUST drop the bytes and wake up any 697 * writers. 698 */ 699 void 700 t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop) 701 { 702 struct mbuf *sndptr, *m, *sb_sndptr; 703 struct fw_ofld_tx_data_wr *txwr; 704 struct wrqe *wr; 705 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 706 struct inpcb *inp = toep->inp; 707 struct tcpcb *tp = intotcpcb(inp); 708 struct socket *so = inp->inp_socket; 709 struct sockbuf *sb = &so->so_snd; 710 int tx_credits, shove, compl, sowwakeup; 711 struct ofld_tx_sdesc *txsd; 712 bool aiotx_mbuf_seen; 713 714 INP_WLOCK_ASSERT(inp); 715 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 716 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 717 718 KASSERT(toep->ulp_mode == ULP_MODE_NONE || 719 toep->ulp_mode == ULP_MODE_TCPDDP || 720 toep->ulp_mode == ULP_MODE_TLS || 721 toep->ulp_mode == ULP_MODE_RDMA, 722 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 723 724 #ifdef VERBOSE_TRACES 725 CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d", 726 __func__, toep->tid, toep->flags, tp->t_flags); 727 #endif 728 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) 729 return; 730 731 #ifdef RATELIMIT 732 if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) && 733 (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) { 734 inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED; 735 } 736 #endif 737 738 /* 739 * This function doesn't resume by itself. Someone else must clear the 740 * flag and call this function. 741 */ 742 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 743 KASSERT(drop == 0, 744 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 745 return; 746 } 747 748 txsd = &toep->txsd[toep->txsd_pidx]; 749 do { 750 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 751 max_imm = max_imm_payload(tx_credits); 752 max_nsegs = max_dsgl_nsegs(tx_credits); 753 754 SOCKBUF_LOCK(sb); 755 sowwakeup = drop; 756 if (drop) { 757 sbdrop_locked(sb, drop); 758 drop = 0; 759 } 760 sb_sndptr = sb->sb_sndptr; 761 sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb; 762 plen = 0; 763 nsegs = 0; 764 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 765 aiotx_mbuf_seen = false; 766 for (m = sndptr; m != NULL; m = m->m_next) { 767 int n; 768 769 if (IS_AIOTX_MBUF(m)) 770 n = sglist_count_vmpages(aiotx_mbuf_pages(m), 771 aiotx_mbuf_pgoff(m), m->m_len); 772 else 773 n = sglist_count(mtod(m, void *), m->m_len); 774 775 nsegs += n; 776 plen += m->m_len; 777 778 /* This mbuf sent us _over_ the nsegs limit, back out */ 779 if (plen > max_imm && nsegs > max_nsegs) { 780 nsegs -= n; 781 plen -= m->m_len; 782 if (plen == 0) { 783 /* Too few credits */ 784 toep->flags |= TPF_TX_SUSPENDED; 785 if (sowwakeup) { 786 if (!TAILQ_EMPTY( 787 &toep->aiotx_jobq)) 788 t4_aiotx_queue_toep( 789 toep); 790 sowwakeup_locked(so); 791 } else 792 SOCKBUF_UNLOCK(sb); 793 SOCKBUF_UNLOCK_ASSERT(sb); 794 return; 795 } 796 break; 797 } 798 799 if (IS_AIOTX_MBUF(m)) 800 aiotx_mbuf_seen = true; 801 if (max_nsegs_1mbuf < n) 802 max_nsegs_1mbuf = n; 803 sb_sndptr = m; /* new sb->sb_sndptr if all goes well */ 804 805 /* This mbuf put us right at the max_nsegs limit */ 806 if (plen > max_imm && nsegs == max_nsegs) { 807 m = m->m_next; 808 break; 809 } 810 } 811 812 if (sbused(sb) > sb->sb_hiwat * 5 / 8 && 813 toep->plen_nocompl + plen >= sb->sb_hiwat / 4) 814 compl = 1; 815 else 816 compl = 0; 817 818 if (sb->sb_flags & SB_AUTOSIZE && 819 V_tcp_do_autosndbuf && 820 sb->sb_hiwat < V_tcp_autosndbuf_max && 821 sbused(sb) >= sb->sb_hiwat * 7 / 8) { 822 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc, 823 V_tcp_autosndbuf_max); 824 825 if (!sbreserve_locked(sb, newsize, so, NULL)) 826 sb->sb_flags &= ~SB_AUTOSIZE; 827 else 828 sowwakeup = 1; /* room available */ 829 } 830 if (sowwakeup) { 831 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) 832 t4_aiotx_queue_toep(toep); 833 sowwakeup_locked(so); 834 } else 835 SOCKBUF_UNLOCK(sb); 836 SOCKBUF_UNLOCK_ASSERT(sb); 837 838 /* nothing to send */ 839 if (plen == 0) { 840 KASSERT(m == NULL, 841 ("%s: nothing to send, but m != NULL", __func__)); 842 break; 843 } 844 845 if (__predict_false(toep->flags & TPF_FIN_SENT)) 846 panic("%s: excess tx.", __func__); 847 848 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME); 849 if (plen <= max_imm && !aiotx_mbuf_seen) { 850 851 /* Immediate data tx */ 852 853 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 854 toep->ofld_txq); 855 if (wr == NULL) { 856 /* XXX: how will we recover from this? */ 857 toep->flags |= TPF_TX_SUSPENDED; 858 return; 859 } 860 txwr = wrtod(wr); 861 credits = howmany(wr->wr_len, 16); 862 write_tx_wr(txwr, toep, plen, plen, credits, shove, 0, 863 sc->tt.tx_align); 864 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 865 nsegs = 0; 866 } else { 867 int wr_len; 868 869 /* DSGL tx */ 870 871 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 872 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 873 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 874 if (wr == NULL) { 875 /* XXX: how will we recover from this? */ 876 toep->flags |= TPF_TX_SUSPENDED; 877 return; 878 } 879 txwr = wrtod(wr); 880 credits = howmany(wr_len, 16); 881 write_tx_wr(txwr, toep, 0, plen, credits, shove, 0, 882 sc->tt.tx_align); 883 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 884 max_nsegs_1mbuf); 885 if (wr_len & 0xf) { 886 uint64_t *pad = (uint64_t *) 887 ((uintptr_t)txwr + wr_len); 888 *pad = 0; 889 } 890 } 891 892 KASSERT(toep->tx_credits >= credits, 893 ("%s: not enough credits", __func__)); 894 895 toep->tx_credits -= credits; 896 toep->tx_nocompl += credits; 897 toep->plen_nocompl += plen; 898 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 899 toep->tx_nocompl >= toep->tx_total / 4) 900 compl = 1; 901 902 if (compl || toep->ulp_mode == ULP_MODE_RDMA) { 903 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 904 toep->tx_nocompl = 0; 905 toep->plen_nocompl = 0; 906 } 907 908 tp->snd_nxt += plen; 909 tp->snd_max += plen; 910 911 SOCKBUF_LOCK(sb); 912 KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__)); 913 sb->sb_sndptr = sb_sndptr; 914 SOCKBUF_UNLOCK(sb); 915 916 toep->flags |= TPF_TX_DATA_SENT; 917 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 918 toep->flags |= TPF_TX_SUSPENDED; 919 920 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 921 txsd->plen = plen; 922 txsd->tx_credits = credits; 923 txsd++; 924 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 925 toep->txsd_pidx = 0; 926 txsd = &toep->txsd[0]; 927 } 928 toep->txsd_avail--; 929 930 t4_l2t_send(sc, wr, toep->l2te); 931 } while (m != NULL); 932 933 /* Send a FIN if requested, but only if there's no more data to send */ 934 if (m == NULL && toep->flags & TPF_SEND_FIN) 935 t4_close_conn(sc, toep); 936 } 937 938 static inline void 939 rqdrop_locked(struct mbufq *q, int plen) 940 { 941 struct mbuf *m; 942 943 while (plen > 0) { 944 m = mbufq_dequeue(q); 945 946 /* Too many credits. */ 947 MPASS(m != NULL); 948 M_ASSERTPKTHDR(m); 949 950 /* Partial credits. */ 951 MPASS(plen >= m->m_pkthdr.len); 952 953 plen -= m->m_pkthdr.len; 954 m_freem(m); 955 } 956 } 957 958 void 959 t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop) 960 { 961 struct mbuf *sndptr, *m; 962 struct fw_ofld_tx_data_wr *txwr; 963 struct wrqe *wr; 964 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 965 u_int adjusted_plen, ulp_submode; 966 struct inpcb *inp = toep->inp; 967 struct tcpcb *tp = intotcpcb(inp); 968 int tx_credits, shove; 969 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 970 struct mbufq *pduq = &toep->ulp_pduq; 971 static const u_int ulp_extra_len[] = {0, 4, 4, 8}; 972 973 INP_WLOCK_ASSERT(inp); 974 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 975 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 976 KASSERT(toep->ulp_mode == ULP_MODE_ISCSI, 977 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 978 979 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) 980 return; 981 982 /* 983 * This function doesn't resume by itself. Someone else must clear the 984 * flag and call this function. 985 */ 986 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 987 KASSERT(drop == 0, 988 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 989 return; 990 } 991 992 if (drop) 993 rqdrop_locked(&toep->ulp_pdu_reclaimq, drop); 994 995 while ((sndptr = mbufq_first(pduq)) != NULL) { 996 M_ASSERTPKTHDR(sndptr); 997 998 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 999 max_imm = max_imm_payload(tx_credits); 1000 max_nsegs = max_dsgl_nsegs(tx_credits); 1001 1002 plen = 0; 1003 nsegs = 0; 1004 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 1005 for (m = sndptr; m != NULL; m = m->m_next) { 1006 int n = sglist_count(mtod(m, void *), m->m_len); 1007 1008 nsegs += n; 1009 plen += m->m_len; 1010 1011 /* 1012 * This mbuf would send us _over_ the nsegs limit. 1013 * Suspend tx because the PDU can't be sent out. 1014 */ 1015 if (plen > max_imm && nsegs > max_nsegs) { 1016 toep->flags |= TPF_TX_SUSPENDED; 1017 return; 1018 } 1019 1020 if (max_nsegs_1mbuf < n) 1021 max_nsegs_1mbuf = n; 1022 } 1023 1024 if (__predict_false(toep->flags & TPF_FIN_SENT)) 1025 panic("%s: excess tx.", __func__); 1026 1027 /* 1028 * We have a PDU to send. All of it goes out in one WR so 'm' 1029 * is NULL. A PDU's length is always a multiple of 4. 1030 */ 1031 MPASS(m == NULL); 1032 MPASS((plen & 3) == 0); 1033 MPASS(sndptr->m_pkthdr.len == plen); 1034 1035 shove = !(tp->t_flags & TF_MORETOCOME); 1036 ulp_submode = mbuf_ulp_submode(sndptr); 1037 MPASS(ulp_submode < nitems(ulp_extra_len)); 1038 1039 /* 1040 * plen doesn't include header and data digests, which are 1041 * generated and inserted in the right places by the TOE, but 1042 * they do occupy TCP sequence space and need to be accounted 1043 * for. 1044 */ 1045 adjusted_plen = plen + ulp_extra_len[ulp_submode]; 1046 if (plen <= max_imm) { 1047 1048 /* Immediate data tx */ 1049 1050 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 1051 toep->ofld_txq); 1052 if (wr == NULL) { 1053 /* XXX: how will we recover from this? */ 1054 toep->flags |= TPF_TX_SUSPENDED; 1055 return; 1056 } 1057 txwr = wrtod(wr); 1058 credits = howmany(wr->wr_len, 16); 1059 write_tx_wr(txwr, toep, plen, adjusted_plen, credits, 1060 shove, ulp_submode, sc->tt.tx_align); 1061 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 1062 nsegs = 0; 1063 } else { 1064 int wr_len; 1065 1066 /* DSGL tx */ 1067 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 1068 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 1069 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 1070 if (wr == NULL) { 1071 /* XXX: how will we recover from this? */ 1072 toep->flags |= TPF_TX_SUSPENDED; 1073 return; 1074 } 1075 txwr = wrtod(wr); 1076 credits = howmany(wr_len, 16); 1077 write_tx_wr(txwr, toep, 0, adjusted_plen, credits, 1078 shove, ulp_submode, sc->tt.tx_align); 1079 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 1080 max_nsegs_1mbuf); 1081 if (wr_len & 0xf) { 1082 uint64_t *pad = (uint64_t *) 1083 ((uintptr_t)txwr + wr_len); 1084 *pad = 0; 1085 } 1086 } 1087 1088 KASSERT(toep->tx_credits >= credits, 1089 ("%s: not enough credits", __func__)); 1090 1091 m = mbufq_dequeue(pduq); 1092 MPASS(m == sndptr); 1093 mbufq_enqueue(&toep->ulp_pdu_reclaimq, m); 1094 1095 toep->tx_credits -= credits; 1096 toep->tx_nocompl += credits; 1097 toep->plen_nocompl += plen; 1098 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 1099 toep->tx_nocompl >= toep->tx_total / 4) { 1100 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 1101 toep->tx_nocompl = 0; 1102 toep->plen_nocompl = 0; 1103 } 1104 1105 tp->snd_nxt += adjusted_plen; 1106 tp->snd_max += adjusted_plen; 1107 1108 toep->flags |= TPF_TX_DATA_SENT; 1109 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 1110 toep->flags |= TPF_TX_SUSPENDED; 1111 1112 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 1113 txsd->plen = plen; 1114 txsd->tx_credits = credits; 1115 txsd++; 1116 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 1117 toep->txsd_pidx = 0; 1118 txsd = &toep->txsd[0]; 1119 } 1120 toep->txsd_avail--; 1121 1122 t4_l2t_send(sc, wr, toep->l2te); 1123 } 1124 1125 /* Send a FIN if requested, but only if there are no more PDUs to send */ 1126 if (mbufq_first(pduq) == NULL && toep->flags & TPF_SEND_FIN) 1127 t4_close_conn(sc, toep); 1128 } 1129 1130 int 1131 t4_tod_output(struct toedev *tod, struct tcpcb *tp) 1132 { 1133 struct adapter *sc = tod->tod_softc; 1134 #ifdef INVARIANTS 1135 struct inpcb *inp = tp->t_inpcb; 1136 #endif 1137 struct toepcb *toep = tp->t_toe; 1138 1139 INP_WLOCK_ASSERT(inp); 1140 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1141 ("%s: inp %p dropped.", __func__, inp)); 1142 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1143 1144 if (toep->ulp_mode == ULP_MODE_ISCSI) 1145 t4_push_pdus(sc, toep, 0); 1146 else if (tls_tx_key(toep)) 1147 t4_push_tls_records(sc, toep, 0); 1148 else 1149 t4_push_frames(sc, toep, 0); 1150 1151 return (0); 1152 } 1153 1154 int 1155 t4_send_fin(struct toedev *tod, struct tcpcb *tp) 1156 { 1157 struct adapter *sc = tod->tod_softc; 1158 #ifdef INVARIANTS 1159 struct inpcb *inp = tp->t_inpcb; 1160 #endif 1161 struct toepcb *toep = tp->t_toe; 1162 1163 INP_WLOCK_ASSERT(inp); 1164 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1165 ("%s: inp %p dropped.", __func__, inp)); 1166 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1167 1168 toep->flags |= TPF_SEND_FIN; 1169 if (tp->t_state >= TCPS_ESTABLISHED) { 1170 if (toep->ulp_mode == ULP_MODE_ISCSI) 1171 t4_push_pdus(sc, toep, 0); 1172 else if (tls_tx_key(toep)) 1173 t4_push_tls_records(sc, toep, 0); 1174 else 1175 t4_push_frames(sc, toep, 0); 1176 } 1177 1178 return (0); 1179 } 1180 1181 int 1182 t4_send_rst(struct toedev *tod, struct tcpcb *tp) 1183 { 1184 struct adapter *sc = tod->tod_softc; 1185 #if defined(INVARIANTS) 1186 struct inpcb *inp = tp->t_inpcb; 1187 #endif 1188 struct toepcb *toep = tp->t_toe; 1189 1190 INP_WLOCK_ASSERT(inp); 1191 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1192 ("%s: inp %p dropped.", __func__, inp)); 1193 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1194 1195 /* hmmmm */ 1196 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 1197 ("%s: flowc for tid %u [%s] not sent already", 1198 __func__, toep->tid, tcpstates[tp->t_state])); 1199 1200 send_reset(sc, toep, 0); 1201 return (0); 1202 } 1203 1204 /* 1205 * Peer has sent us a FIN. 1206 */ 1207 static int 1208 do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1209 { 1210 struct adapter *sc = iq->adapter; 1211 const struct cpl_peer_close *cpl = (const void *)(rss + 1); 1212 unsigned int tid = GET_TID(cpl); 1213 struct toepcb *toep = lookup_tid(sc, tid); 1214 struct inpcb *inp = toep->inp; 1215 struct tcpcb *tp = NULL; 1216 struct socket *so; 1217 struct epoch_tracker et; 1218 #ifdef INVARIANTS 1219 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1220 #endif 1221 1222 KASSERT(opcode == CPL_PEER_CLOSE, 1223 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1224 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1225 1226 if (__predict_false(toep->flags & TPF_SYNQE)) { 1227 /* 1228 * do_pass_establish must have run before do_peer_close and if 1229 * this is still a synqe instead of a toepcb then the connection 1230 * must be getting aborted. 1231 */ 1232 MPASS(toep->flags & TPF_ABORT_SHUTDOWN); 1233 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1234 toep, toep->flags); 1235 return (0); 1236 } 1237 1238 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1239 1240 CURVNET_SET(toep->vnet); 1241 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 1242 INP_WLOCK(inp); 1243 tp = intotcpcb(inp); 1244 1245 CTR5(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x, inp %p", __func__, 1246 tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, inp); 1247 1248 if (toep->flags & TPF_ABORT_SHUTDOWN) 1249 goto done; 1250 1251 tp->rcv_nxt++; /* FIN */ 1252 1253 so = inp->inp_socket; 1254 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1255 DDP_LOCK(toep); 1256 if (__predict_false(toep->ddp.flags & 1257 (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE))) 1258 handle_ddp_close(toep, tp, cpl->rcv_nxt); 1259 DDP_UNLOCK(toep); 1260 } 1261 socantrcvmore(so); 1262 1263 if (toep->ulp_mode != ULP_MODE_RDMA) { 1264 KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt), 1265 ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt, 1266 be32toh(cpl->rcv_nxt))); 1267 } 1268 1269 switch (tp->t_state) { 1270 case TCPS_SYN_RECEIVED: 1271 tp->t_starttime = ticks; 1272 /* FALLTHROUGH */ 1273 1274 case TCPS_ESTABLISHED: 1275 tcp_state_change(tp, TCPS_CLOSE_WAIT); 1276 break; 1277 1278 case TCPS_FIN_WAIT_1: 1279 tcp_state_change(tp, TCPS_CLOSING); 1280 break; 1281 1282 case TCPS_FIN_WAIT_2: 1283 tcp_twstart(tp); 1284 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1285 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1286 CURVNET_RESTORE(); 1287 1288 INP_WLOCK(inp); 1289 final_cpl_received(toep); 1290 return (0); 1291 1292 default: 1293 log(LOG_ERR, "%s: TID %u received CPL_PEER_CLOSE in state %d\n", 1294 __func__, tid, tp->t_state); 1295 } 1296 done: 1297 INP_WUNLOCK(inp); 1298 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1299 CURVNET_RESTORE(); 1300 return (0); 1301 } 1302 1303 /* 1304 * Peer has ACK'd our FIN. 1305 */ 1306 static int 1307 do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss, 1308 struct mbuf *m) 1309 { 1310 struct adapter *sc = iq->adapter; 1311 const struct cpl_close_con_rpl *cpl = (const void *)(rss + 1); 1312 unsigned int tid = GET_TID(cpl); 1313 struct toepcb *toep = lookup_tid(sc, tid); 1314 struct inpcb *inp = toep->inp; 1315 struct tcpcb *tp = NULL; 1316 struct socket *so = NULL; 1317 struct epoch_tracker et; 1318 #ifdef INVARIANTS 1319 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1320 #endif 1321 1322 KASSERT(opcode == CPL_CLOSE_CON_RPL, 1323 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1324 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1325 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1326 1327 CURVNET_SET(toep->vnet); 1328 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 1329 INP_WLOCK(inp); 1330 tp = intotcpcb(inp); 1331 1332 CTR4(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x", 1333 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags); 1334 1335 if (toep->flags & TPF_ABORT_SHUTDOWN) 1336 goto done; 1337 1338 so = inp->inp_socket; 1339 tp->snd_una = be32toh(cpl->snd_nxt) - 1; /* exclude FIN */ 1340 1341 switch (tp->t_state) { 1342 case TCPS_CLOSING: /* see TCPS_FIN_WAIT_2 in do_peer_close too */ 1343 tcp_twstart(tp); 1344 release: 1345 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1346 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1347 CURVNET_RESTORE(); 1348 1349 INP_WLOCK(inp); 1350 final_cpl_received(toep); /* no more CPLs expected */ 1351 1352 return (0); 1353 case TCPS_LAST_ACK: 1354 if (tcp_close(tp)) 1355 INP_WUNLOCK(inp); 1356 goto release; 1357 1358 case TCPS_FIN_WAIT_1: 1359 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 1360 soisdisconnected(so); 1361 tcp_state_change(tp, TCPS_FIN_WAIT_2); 1362 break; 1363 1364 default: 1365 log(LOG_ERR, 1366 "%s: TID %u received CPL_CLOSE_CON_RPL in state %s\n", 1367 __func__, tid, tcpstates[tp->t_state]); 1368 } 1369 done: 1370 INP_WUNLOCK(inp); 1371 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1372 CURVNET_RESTORE(); 1373 return (0); 1374 } 1375 1376 void 1377 send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid, 1378 int rst_status) 1379 { 1380 struct wrqe *wr; 1381 struct cpl_abort_rpl *cpl; 1382 1383 wr = alloc_wrqe(sizeof(*cpl), ofld_txq); 1384 if (wr == NULL) { 1385 /* XXX */ 1386 panic("%s: allocation failure.", __func__); 1387 } 1388 cpl = wrtod(wr); 1389 1390 INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid); 1391 cpl->cmd = rst_status; 1392 1393 t4_wrq_tx(sc, wr); 1394 } 1395 1396 static int 1397 abort_status_to_errno(struct tcpcb *tp, unsigned int abort_reason) 1398 { 1399 switch (abort_reason) { 1400 case CPL_ERR_BAD_SYN: 1401 case CPL_ERR_CONN_RESET: 1402 return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET); 1403 case CPL_ERR_XMIT_TIMEDOUT: 1404 case CPL_ERR_PERSIST_TIMEDOUT: 1405 case CPL_ERR_FINWAIT2_TIMEDOUT: 1406 case CPL_ERR_KEEPALIVE_TIMEDOUT: 1407 return (ETIMEDOUT); 1408 default: 1409 return (EIO); 1410 } 1411 } 1412 1413 /* 1414 * TCP RST from the peer, timeout, or some other such critical error. 1415 */ 1416 static int 1417 do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1418 { 1419 struct adapter *sc = iq->adapter; 1420 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); 1421 unsigned int tid = GET_TID(cpl); 1422 struct toepcb *toep = lookup_tid(sc, tid); 1423 struct sge_wrq *ofld_txq = toep->ofld_txq; 1424 struct inpcb *inp; 1425 struct tcpcb *tp; 1426 struct epoch_tracker et; 1427 #ifdef INVARIANTS 1428 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1429 #endif 1430 1431 KASSERT(opcode == CPL_ABORT_REQ_RSS, 1432 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1433 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1434 1435 if (toep->flags & TPF_SYNQE) 1436 return (do_abort_req_synqe(iq, rss, m)); 1437 1438 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1439 1440 if (negative_advice(cpl->status)) { 1441 CTR4(KTR_CXGBE, "%s: negative advice %d for tid %d (0x%x)", 1442 __func__, cpl->status, tid, toep->flags); 1443 return (0); /* Ignore negative advice */ 1444 } 1445 1446 inp = toep->inp; 1447 CURVNET_SET(toep->vnet); 1448 INP_INFO_RLOCK_ET(&V_tcbinfo, et); /* for tcp_close */ 1449 INP_WLOCK(inp); 1450 1451 tp = intotcpcb(inp); 1452 1453 CTR6(KTR_CXGBE, 1454 "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x, status %d", 1455 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, 1456 inp->inp_flags, cpl->status); 1457 1458 /* 1459 * If we'd initiated an abort earlier the reply to it is responsible for 1460 * cleaning up resources. Otherwise we tear everything down right here 1461 * right now. We owe the T4 a CPL_ABORT_RPL no matter what. 1462 */ 1463 if (toep->flags & TPF_ABORT_SHUTDOWN) { 1464 INP_WUNLOCK(inp); 1465 goto done; 1466 } 1467 toep->flags |= TPF_ABORT_SHUTDOWN; 1468 1469 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 1470 struct socket *so = inp->inp_socket; 1471 1472 if (so != NULL) 1473 so_error_set(so, abort_status_to_errno(tp, 1474 cpl->status)); 1475 tp = tcp_close(tp); 1476 if (tp == NULL) 1477 INP_WLOCK(inp); /* re-acquire */ 1478 } 1479 1480 final_cpl_received(toep); 1481 done: 1482 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1483 CURVNET_RESTORE(); 1484 send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST); 1485 return (0); 1486 } 1487 1488 /* 1489 * Reply to the CPL_ABORT_REQ (send_reset) 1490 */ 1491 static int 1492 do_abort_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1493 { 1494 struct adapter *sc = iq->adapter; 1495 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); 1496 unsigned int tid = GET_TID(cpl); 1497 struct toepcb *toep = lookup_tid(sc, tid); 1498 struct inpcb *inp = toep->inp; 1499 #ifdef INVARIANTS 1500 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1501 #endif 1502 1503 KASSERT(opcode == CPL_ABORT_RPL_RSS, 1504 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1505 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1506 1507 if (toep->flags & TPF_SYNQE) 1508 return (do_abort_rpl_synqe(iq, rss, m)); 1509 1510 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1511 1512 CTR5(KTR_CXGBE, "%s: tid %u, toep %p, inp %p, status %d", 1513 __func__, tid, toep, inp, cpl->status); 1514 1515 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1516 ("%s: wasn't expecting abort reply", __func__)); 1517 1518 INP_WLOCK(inp); 1519 final_cpl_received(toep); 1520 1521 return (0); 1522 } 1523 1524 static int 1525 do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1526 { 1527 struct adapter *sc = iq->adapter; 1528 const struct cpl_rx_data *cpl = mtod(m, const void *); 1529 unsigned int tid = GET_TID(cpl); 1530 struct toepcb *toep = lookup_tid(sc, tid); 1531 struct inpcb *inp = toep->inp; 1532 struct tcpcb *tp; 1533 struct socket *so; 1534 struct sockbuf *sb; 1535 struct epoch_tracker et; 1536 int len, rx_credits; 1537 uint32_t ddp_placed = 0; 1538 1539 if (__predict_false(toep->flags & TPF_SYNQE)) { 1540 /* 1541 * do_pass_establish must have run before do_rx_data and if this 1542 * is still a synqe instead of a toepcb then the connection must 1543 * be getting aborted. 1544 */ 1545 MPASS(toep->flags & TPF_ABORT_SHUTDOWN); 1546 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1547 toep, toep->flags); 1548 m_freem(m); 1549 return (0); 1550 } 1551 1552 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1553 1554 /* strip off CPL header */ 1555 m_adj(m, sizeof(*cpl)); 1556 len = m->m_pkthdr.len; 1557 1558 INP_WLOCK(inp); 1559 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) { 1560 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", 1561 __func__, tid, len, inp->inp_flags); 1562 INP_WUNLOCK(inp); 1563 m_freem(m); 1564 return (0); 1565 } 1566 1567 tp = intotcpcb(inp); 1568 1569 if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq))) 1570 ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt; 1571 1572 tp->rcv_nxt += len; 1573 if (tp->rcv_wnd < len) { 1574 KASSERT(toep->ulp_mode == ULP_MODE_RDMA, 1575 ("%s: negative window size", __func__)); 1576 } 1577 1578 tp->rcv_wnd -= len; 1579 tp->t_rcvtime = ticks; 1580 1581 if (toep->ulp_mode == ULP_MODE_TCPDDP) 1582 DDP_LOCK(toep); 1583 so = inp_inpcbtosocket(inp); 1584 sb = &so->so_rcv; 1585 SOCKBUF_LOCK(sb); 1586 1587 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 1588 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)", 1589 __func__, tid, len); 1590 m_freem(m); 1591 SOCKBUF_UNLOCK(sb); 1592 if (toep->ulp_mode == ULP_MODE_TCPDDP) 1593 DDP_UNLOCK(toep); 1594 INP_WUNLOCK(inp); 1595 1596 CURVNET_SET(toep->vnet); 1597 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 1598 INP_WLOCK(inp); 1599 tp = tcp_drop(tp, ECONNRESET); 1600 if (tp) 1601 INP_WUNLOCK(inp); 1602 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1603 CURVNET_RESTORE(); 1604 1605 return (0); 1606 } 1607 1608 /* receive buffer autosize */ 1609 MPASS(toep->vnet == so->so_vnet); 1610 CURVNET_SET(toep->vnet); 1611 if (sb->sb_flags & SB_AUTOSIZE && 1612 V_tcp_do_autorcvbuf && 1613 sb->sb_hiwat < V_tcp_autorcvbuf_max && 1614 len > (sbspace(sb) / 8 * 7)) { 1615 unsigned int hiwat = sb->sb_hiwat; 1616 unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc, 1617 V_tcp_autorcvbuf_max); 1618 1619 if (!sbreserve_locked(sb, newsize, so, NULL)) 1620 sb->sb_flags &= ~SB_AUTOSIZE; 1621 } 1622 1623 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1624 int changed = !(toep->ddp.flags & DDP_ON) ^ cpl->ddp_off; 1625 1626 if (toep->ddp.waiting_count != 0 || toep->ddp.active_count != 0) 1627 CTR3(KTR_CXGBE, "%s: tid %u, non-ddp rx (%d bytes)", 1628 __func__, tid, len); 1629 1630 if (changed) { 1631 if (toep->ddp.flags & DDP_SC_REQ) 1632 toep->ddp.flags ^= DDP_ON | DDP_SC_REQ; 1633 else { 1634 KASSERT(cpl->ddp_off == 1, 1635 ("%s: DDP switched on by itself.", 1636 __func__)); 1637 1638 /* Fell out of DDP mode */ 1639 toep->ddp.flags &= ~DDP_ON; 1640 CTR1(KTR_CXGBE, "%s: fell out of DDP mode", 1641 __func__); 1642 1643 insert_ddp_data(toep, ddp_placed); 1644 } 1645 } 1646 1647 if (toep->ddp.flags & DDP_ON) { 1648 /* 1649 * CPL_RX_DATA with DDP on can only be an indicate. 1650 * Start posting queued AIO requests via DDP. The 1651 * payload that arrived in this indicate is appended 1652 * to the socket buffer as usual. 1653 */ 1654 handle_ddp_indicate(toep); 1655 } 1656 } 1657 1658 sbappendstream_locked(sb, m, 0); 1659 rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0; 1660 if (rx_credits > 0 && sbused(sb) + tp->rcv_wnd < sb->sb_lowat) { 1661 rx_credits = send_rx_credits(sc, toep, rx_credits); 1662 tp->rcv_wnd += rx_credits; 1663 tp->rcv_adv += rx_credits; 1664 } 1665 1666 if (toep->ulp_mode == ULP_MODE_TCPDDP && toep->ddp.waiting_count > 0 && 1667 sbavail(sb) != 0) { 1668 CTR2(KTR_CXGBE, "%s: tid %u queueing AIO task", __func__, 1669 tid); 1670 ddp_queue_toep(toep); 1671 } 1672 sorwakeup_locked(so); 1673 SOCKBUF_UNLOCK_ASSERT(sb); 1674 if (toep->ulp_mode == ULP_MODE_TCPDDP) 1675 DDP_UNLOCK(toep); 1676 1677 INP_WUNLOCK(inp); 1678 CURVNET_RESTORE(); 1679 return (0); 1680 } 1681 1682 static int 1683 do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1684 { 1685 struct adapter *sc = iq->adapter; 1686 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 1687 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 1688 struct toepcb *toep = lookup_tid(sc, tid); 1689 struct inpcb *inp; 1690 struct tcpcb *tp; 1691 struct socket *so; 1692 uint8_t credits = cpl->credits; 1693 struct ofld_tx_sdesc *txsd; 1694 int plen; 1695 #ifdef INVARIANTS 1696 unsigned int opcode = G_CPL_FW4_ACK_OPCODE(be32toh(OPCODE_TID(cpl))); 1697 #endif 1698 1699 /* 1700 * Very unusual case: we'd sent a flowc + abort_req for a synq entry and 1701 * now this comes back carrying the credits for the flowc. 1702 */ 1703 if (__predict_false(toep->flags & TPF_SYNQE)) { 1704 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1705 ("%s: credits for a synq entry %p", __func__, toep)); 1706 return (0); 1707 } 1708 1709 inp = toep->inp; 1710 1711 KASSERT(opcode == CPL_FW4_ACK, 1712 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1713 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1714 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1715 1716 INP_WLOCK(inp); 1717 1718 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) { 1719 INP_WUNLOCK(inp); 1720 return (0); 1721 } 1722 1723 KASSERT((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0, 1724 ("%s: inp_flags 0x%x", __func__, inp->inp_flags)); 1725 1726 tp = intotcpcb(inp); 1727 1728 if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) { 1729 tcp_seq snd_una = be32toh(cpl->snd_una); 1730 1731 #ifdef INVARIANTS 1732 if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) { 1733 log(LOG_ERR, 1734 "%s: unexpected seq# %x for TID %u, snd_una %x\n", 1735 __func__, snd_una, toep->tid, tp->snd_una); 1736 } 1737 #endif 1738 1739 if (tp->snd_una != snd_una) { 1740 tp->snd_una = snd_una; 1741 tp->ts_recent_age = tcp_ts_getticks(); 1742 } 1743 } 1744 1745 #ifdef VERBOSE_TRACES 1746 CTR3(KTR_CXGBE, "%s: tid %d credits %u", __func__, tid, credits); 1747 #endif 1748 so = inp->inp_socket; 1749 txsd = &toep->txsd[toep->txsd_cidx]; 1750 plen = 0; 1751 while (credits) { 1752 KASSERT(credits >= txsd->tx_credits, 1753 ("%s: too many (or partial) credits", __func__)); 1754 credits -= txsd->tx_credits; 1755 toep->tx_credits += txsd->tx_credits; 1756 plen += txsd->plen; 1757 if (txsd->iv_buffer) { 1758 free(txsd->iv_buffer, M_CXGBE); 1759 txsd->iv_buffer = NULL; 1760 } 1761 txsd++; 1762 toep->txsd_avail++; 1763 KASSERT(toep->txsd_avail <= toep->txsd_total, 1764 ("%s: txsd avail > total", __func__)); 1765 if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) { 1766 txsd = &toep->txsd[0]; 1767 toep->txsd_cidx = 0; 1768 } 1769 } 1770 1771 if (toep->tx_credits == toep->tx_total) { 1772 toep->tx_nocompl = 0; 1773 toep->plen_nocompl = 0; 1774 } 1775 1776 if (toep->flags & TPF_TX_SUSPENDED && 1777 toep->tx_credits >= toep->tx_total / 4) { 1778 #ifdef VERBOSE_TRACES 1779 CTR2(KTR_CXGBE, "%s: tid %d calling t4_push_frames", __func__, 1780 tid); 1781 #endif 1782 toep->flags &= ~TPF_TX_SUSPENDED; 1783 CURVNET_SET(toep->vnet); 1784 if (toep->ulp_mode == ULP_MODE_ISCSI) 1785 t4_push_pdus(sc, toep, plen); 1786 else if (tls_tx_key(toep)) 1787 t4_push_tls_records(sc, toep, plen); 1788 else 1789 t4_push_frames(sc, toep, plen); 1790 CURVNET_RESTORE(); 1791 } else if (plen > 0) { 1792 struct sockbuf *sb = &so->so_snd; 1793 int sbu; 1794 1795 SOCKBUF_LOCK(sb); 1796 sbu = sbused(sb); 1797 if (toep->ulp_mode == ULP_MODE_ISCSI) { 1798 1799 if (__predict_false(sbu > 0)) { 1800 /* 1801 * The data trasmitted before the tid's ULP mode 1802 * changed to ISCSI is still in so_snd. 1803 * Incoming credits should account for so_snd 1804 * first. 1805 */ 1806 sbdrop_locked(sb, min(sbu, plen)); 1807 plen -= min(sbu, plen); 1808 } 1809 sowwakeup_locked(so); /* unlocks so_snd */ 1810 rqdrop_locked(&toep->ulp_pdu_reclaimq, plen); 1811 } else { 1812 #ifdef VERBOSE_TRACES 1813 CTR3(KTR_CXGBE, "%s: tid %d dropped %d bytes", __func__, 1814 tid, plen); 1815 #endif 1816 sbdrop_locked(sb, plen); 1817 if (tls_tx_key(toep)) { 1818 struct tls_ofld_info *tls_ofld = &toep->tls; 1819 1820 MPASS(tls_ofld->sb_off >= plen); 1821 tls_ofld->sb_off -= plen; 1822 } 1823 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) 1824 t4_aiotx_queue_toep(toep); 1825 sowwakeup_locked(so); /* unlocks so_snd */ 1826 } 1827 SOCKBUF_UNLOCK_ASSERT(sb); 1828 } 1829 1830 INP_WUNLOCK(inp); 1831 1832 return (0); 1833 } 1834 1835 void 1836 t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, struct toepcb *toep, 1837 uint16_t word, uint64_t mask, uint64_t val, int reply, int cookie) 1838 { 1839 struct wrqe *wr; 1840 struct cpl_set_tcb_field *req; 1841 struct ofld_tx_sdesc *txsd; 1842 1843 MPASS((cookie & ~M_COOKIE) == 0); 1844 if (reply) { 1845 MPASS(cookie != CPL_COOKIE_RESERVED); 1846 } 1847 1848 wr = alloc_wrqe(sizeof(*req), wrq); 1849 if (wr == NULL) { 1850 /* XXX */ 1851 panic("%s: allocation failure.", __func__); 1852 } 1853 req = wrtod(wr); 1854 1855 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid); 1856 req->reply_ctrl = htobe16(V_QUEUENO(toep->ofld_rxq->iq.abs_id)); 1857 if (reply == 0) 1858 req->reply_ctrl |= htobe16(F_NO_REPLY); 1859 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie)); 1860 req->mask = htobe64(mask); 1861 req->val = htobe64(val); 1862 if ((wrq->eq.flags & EQ_TYPEMASK) == EQ_OFLD) { 1863 txsd = &toep->txsd[toep->txsd_pidx]; 1864 txsd->tx_credits = howmany(sizeof(*req), 16); 1865 txsd->plen = 0; 1866 KASSERT(toep->tx_credits >= txsd->tx_credits && 1867 toep->txsd_avail > 0, 1868 ("%s: not enough credits (%d)", __func__, 1869 toep->tx_credits)); 1870 toep->tx_credits -= txsd->tx_credits; 1871 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 1872 toep->txsd_pidx = 0; 1873 toep->txsd_avail--; 1874 } 1875 1876 t4_wrq_tx(sc, wr); 1877 } 1878 1879 void 1880 t4_init_cpl_io_handlers(void) 1881 { 1882 1883 t4_register_cpl_handler(CPL_PEER_CLOSE, do_peer_close); 1884 t4_register_cpl_handler(CPL_CLOSE_CON_RPL, do_close_con_rpl); 1885 t4_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req); 1886 t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl, 1887 CPL_COOKIE_TOM); 1888 t4_register_cpl_handler(CPL_RX_DATA, do_rx_data); 1889 t4_register_shared_cpl_handler(CPL_FW4_ACK, do_fw4_ack, CPL_COOKIE_TOM); 1890 } 1891 1892 void 1893 t4_uninit_cpl_io_handlers(void) 1894 { 1895 1896 t4_register_cpl_handler(CPL_PEER_CLOSE, NULL); 1897 t4_register_cpl_handler(CPL_CLOSE_CON_RPL, NULL); 1898 t4_register_cpl_handler(CPL_ABORT_REQ_RSS, NULL); 1899 t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS, NULL, CPL_COOKIE_TOM); 1900 t4_register_cpl_handler(CPL_RX_DATA, NULL); 1901 t4_register_shared_cpl_handler(CPL_FW4_ACK, NULL, CPL_COOKIE_TOM); 1902 } 1903 1904 /* 1905 * Use the 'backend3' field in AIO jobs to store the amount of data 1906 * sent by the AIO job so far and the 'backend4' field to hold an 1907 * error that should be reported when the job is completed. 1908 */ 1909 #define aio_sent backend3 1910 #define aio_error backend4 1911 1912 #define jobtotid(job) \ 1913 (((struct toepcb *)(so_sototcpcb((job)->fd_file->f_data)->t_toe))->tid) 1914 1915 static void 1916 free_aiotx_buffer(struct aiotx_buffer *ab) 1917 { 1918 struct kaiocb *job; 1919 long status; 1920 int error; 1921 1922 if (refcount_release(&ab->refcount) == 0) 1923 return; 1924 1925 job = ab->job; 1926 error = job->aio_error; 1927 status = job->aio_sent; 1928 vm_page_unhold_pages(ab->ps.pages, ab->ps.npages); 1929 free(ab, M_CXGBE); 1930 #ifdef VERBOSE_TRACES 1931 CTR5(KTR_CXGBE, "%s: tid %d completed %p len %ld, error %d", __func__, 1932 jobtotid(job), job, status, error); 1933 #endif 1934 if (error == ECANCELED && status != 0) 1935 error = 0; 1936 if (error == ECANCELED) 1937 aio_cancel(job); 1938 else if (error) 1939 aio_complete(job, -1, error); 1940 else 1941 aio_complete(job, status, 0); 1942 } 1943 1944 static void 1945 t4_aiotx_mbuf_free(struct mbuf *m) 1946 { 1947 struct aiotx_buffer *ab = m->m_ext.ext_arg1; 1948 1949 #ifdef VERBOSE_TRACES 1950 CTR3(KTR_CXGBE, "%s: completed %d bytes for tid %d", __func__, 1951 m->m_len, jobtotid(ab->job)); 1952 #endif 1953 free_aiotx_buffer(ab); 1954 } 1955 1956 /* 1957 * Hold the buffer backing an AIO request and return an AIO transmit 1958 * buffer. 1959 */ 1960 static int 1961 hold_aio(struct kaiocb *job) 1962 { 1963 struct aiotx_buffer *ab; 1964 struct vmspace *vm; 1965 vm_map_t map; 1966 vm_offset_t start, end, pgoff; 1967 int n; 1968 1969 MPASS(job->backend1 == NULL); 1970 1971 /* 1972 * The AIO subsystem will cancel and drain all requests before 1973 * permitting a process to exit or exec, so p_vmspace should 1974 * be stable here. 1975 */ 1976 vm = job->userproc->p_vmspace; 1977 map = &vm->vm_map; 1978 start = (uintptr_t)job->uaiocb.aio_buf; 1979 pgoff = start & PAGE_MASK; 1980 end = round_page(start + job->uaiocb.aio_nbytes); 1981 start = trunc_page(start); 1982 n = atop(end - start); 1983 1984 ab = malloc(sizeof(*ab) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK | 1985 M_ZERO); 1986 refcount_init(&ab->refcount, 1); 1987 ab->ps.pages = (vm_page_t *)(ab + 1); 1988 ab->ps.npages = vm_fault_quick_hold_pages(map, start, end - start, 1989 VM_PROT_WRITE, ab->ps.pages, n); 1990 if (ab->ps.npages < 0) { 1991 free(ab, M_CXGBE); 1992 return (EFAULT); 1993 } 1994 1995 KASSERT(ab->ps.npages == n, 1996 ("hold_aio: page count mismatch: %d vs %d", ab->ps.npages, n)); 1997 1998 ab->ps.offset = pgoff; 1999 ab->ps.len = job->uaiocb.aio_nbytes; 2000 ab->job = job; 2001 job->backend1 = ab; 2002 #ifdef VERBOSE_TRACES 2003 CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d", 2004 __func__, jobtotid(job), &ab->ps, job, ab->ps.npages); 2005 #endif 2006 return (0); 2007 } 2008 2009 static void 2010 t4_aiotx_process_job(struct toepcb *toep, struct socket *so, struct kaiocb *job) 2011 { 2012 struct adapter *sc; 2013 struct sockbuf *sb; 2014 struct file *fp; 2015 struct aiotx_buffer *ab; 2016 struct inpcb *inp; 2017 struct tcpcb *tp; 2018 struct mbuf *m; 2019 int error; 2020 bool moretocome, sendmore; 2021 2022 sc = td_adapter(toep->td); 2023 sb = &so->so_snd; 2024 SOCKBUF_UNLOCK(sb); 2025 fp = job->fd_file; 2026 ab = job->backend1; 2027 m = NULL; 2028 2029 #ifdef MAC 2030 error = mac_socket_check_send(fp->f_cred, so); 2031 if (error != 0) 2032 goto out; 2033 #endif 2034 2035 if (ab == NULL) { 2036 error = hold_aio(job); 2037 if (error != 0) 2038 goto out; 2039 ab = job->backend1; 2040 } 2041 2042 /* Inline sosend_generic(). */ 2043 2044 job->msgsnd = 1; 2045 2046 error = sblock(sb, SBL_WAIT); 2047 MPASS(error == 0); 2048 2049 sendanother: 2050 m = m_get(M_WAITOK, MT_DATA); 2051 2052 SOCKBUF_LOCK(sb); 2053 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 2054 SOCKBUF_UNLOCK(sb); 2055 sbunlock(sb); 2056 if ((so->so_options & SO_NOSIGPIPE) == 0) { 2057 PROC_LOCK(job->userproc); 2058 kern_psignal(job->userproc, SIGPIPE); 2059 PROC_UNLOCK(job->userproc); 2060 } 2061 error = EPIPE; 2062 goto out; 2063 } 2064 if (so->so_error) { 2065 error = so->so_error; 2066 so->so_error = 0; 2067 SOCKBUF_UNLOCK(sb); 2068 sbunlock(sb); 2069 goto out; 2070 } 2071 if ((so->so_state & SS_ISCONNECTED) == 0) { 2072 SOCKBUF_UNLOCK(sb); 2073 sbunlock(sb); 2074 error = ENOTCONN; 2075 goto out; 2076 } 2077 if (sbspace(sb) < sb->sb_lowat) { 2078 MPASS(job->aio_sent == 0 || !(so->so_state & SS_NBIO)); 2079 2080 /* 2081 * Don't block if there is too little room in the socket 2082 * buffer. Instead, requeue the request. 2083 */ 2084 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) { 2085 SOCKBUF_UNLOCK(sb); 2086 sbunlock(sb); 2087 error = ECANCELED; 2088 goto out; 2089 } 2090 TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list); 2091 SOCKBUF_UNLOCK(sb); 2092 sbunlock(sb); 2093 goto out; 2094 } 2095 2096 /* 2097 * Write as much data as the socket permits, but no more than a 2098 * a single sndbuf at a time. 2099 */ 2100 m->m_len = sbspace(sb); 2101 if (m->m_len > ab->ps.len - job->aio_sent) { 2102 m->m_len = ab->ps.len - job->aio_sent; 2103 moretocome = false; 2104 } else 2105 moretocome = true; 2106 if (m->m_len > sc->tt.sndbuf) { 2107 m->m_len = sc->tt.sndbuf; 2108 sendmore = true; 2109 } else 2110 sendmore = false; 2111 2112 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) 2113 moretocome = true; 2114 SOCKBUF_UNLOCK(sb); 2115 MPASS(m->m_len != 0); 2116 2117 /* Inlined tcp_usr_send(). */ 2118 2119 inp = toep->inp; 2120 INP_WLOCK(inp); 2121 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 2122 INP_WUNLOCK(inp); 2123 sbunlock(sb); 2124 error = ECONNRESET; 2125 goto out; 2126 } 2127 2128 refcount_acquire(&ab->refcount); 2129 m_extadd(m, NULL, ab->ps.len, t4_aiotx_mbuf_free, ab, 2130 (void *)(uintptr_t)job->aio_sent, 0, EXT_NET_DRV); 2131 m->m_ext.ext_flags |= EXT_FLAG_AIOTX; 2132 job->aio_sent += m->m_len; 2133 2134 sbappendstream(sb, m, 0); 2135 m = NULL; 2136 2137 if (!(inp->inp_flags & INP_DROPPED)) { 2138 tp = intotcpcb(inp); 2139 if (moretocome) 2140 tp->t_flags |= TF_MORETOCOME; 2141 error = tp->t_fb->tfb_tcp_output(tp); 2142 if (moretocome) 2143 tp->t_flags &= ~TF_MORETOCOME; 2144 } 2145 2146 INP_WUNLOCK(inp); 2147 if (sendmore) 2148 goto sendanother; 2149 sbunlock(sb); 2150 2151 if (error) 2152 goto out; 2153 2154 /* 2155 * If this is a non-blocking socket and the request has not 2156 * been fully completed, requeue it until the socket is ready 2157 * again. 2158 */ 2159 if (job->aio_sent < job->uaiocb.aio_nbytes && 2160 !(so->so_state & SS_NBIO)) { 2161 SOCKBUF_LOCK(sb); 2162 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) { 2163 SOCKBUF_UNLOCK(sb); 2164 error = ECANCELED; 2165 goto out; 2166 } 2167 TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list); 2168 return; 2169 } 2170 2171 /* 2172 * If the request will not be requeued, drop a reference on 2173 * the aiotx buffer. Any mbufs in flight should still 2174 * contain a reference, but this drops the reference that the 2175 * job owns while it is waiting to queue mbufs to the socket. 2176 */ 2177 free_aiotx_buffer(ab); 2178 2179 out: 2180 if (error) { 2181 if (ab != NULL) { 2182 job->aio_error = error; 2183 free_aiotx_buffer(ab); 2184 } else { 2185 MPASS(job->aio_sent == 0); 2186 aio_complete(job, -1, error); 2187 } 2188 } 2189 if (m != NULL) 2190 m_free(m); 2191 SOCKBUF_LOCK(sb); 2192 } 2193 2194 static void 2195 t4_aiotx_task(void *context, int pending) 2196 { 2197 struct toepcb *toep = context; 2198 struct inpcb *inp = toep->inp; 2199 struct socket *so = inp->inp_socket; 2200 struct kaiocb *job; 2201 2202 CURVNET_SET(toep->vnet); 2203 SOCKBUF_LOCK(&so->so_snd); 2204 while (!TAILQ_EMPTY(&toep->aiotx_jobq) && sowriteable(so)) { 2205 job = TAILQ_FIRST(&toep->aiotx_jobq); 2206 TAILQ_REMOVE(&toep->aiotx_jobq, job, list); 2207 if (!aio_clear_cancel_function(job)) 2208 continue; 2209 2210 t4_aiotx_process_job(toep, so, job); 2211 } 2212 toep->aiotx_task_active = false; 2213 SOCKBUF_UNLOCK(&so->so_snd); 2214 CURVNET_RESTORE(); 2215 2216 free_toepcb(toep); 2217 } 2218 2219 static void 2220 t4_aiotx_queue_toep(struct toepcb *toep) 2221 { 2222 2223 SOCKBUF_LOCK_ASSERT(&toep->inp->inp_socket->so_snd); 2224 #ifdef VERBOSE_TRACES 2225 CTR3(KTR_CXGBE, "%s: queueing aiotx task for tid %d, active = %s", 2226 __func__, toep->tid, toep->aiotx_task_active ? "true" : "false"); 2227 #endif 2228 if (toep->aiotx_task_active) 2229 return; 2230 toep->aiotx_task_active = true; 2231 hold_toepcb(toep); 2232 soaio_enqueue(&toep->aiotx_task); 2233 } 2234 2235 static void 2236 t4_aiotx_cancel(struct kaiocb *job) 2237 { 2238 struct aiotx_buffer *ab; 2239 struct socket *so; 2240 struct sockbuf *sb; 2241 struct tcpcb *tp; 2242 struct toepcb *toep; 2243 2244 so = job->fd_file->f_data; 2245 tp = so_sototcpcb(so); 2246 toep = tp->t_toe; 2247 MPASS(job->uaiocb.aio_lio_opcode == LIO_WRITE); 2248 sb = &so->so_snd; 2249 2250 SOCKBUF_LOCK(sb); 2251 if (!aio_cancel_cleared(job)) 2252 TAILQ_REMOVE(&toep->aiotx_jobq, job, list); 2253 SOCKBUF_UNLOCK(sb); 2254 2255 ab = job->backend1; 2256 if (ab != NULL) 2257 free_aiotx_buffer(ab); 2258 else 2259 aio_cancel(job); 2260 } 2261 2262 int 2263 t4_aio_queue_aiotx(struct socket *so, struct kaiocb *job) 2264 { 2265 struct tcpcb *tp = so_sototcpcb(so); 2266 struct toepcb *toep = tp->t_toe; 2267 struct adapter *sc = td_adapter(toep->td); 2268 2269 /* This only handles writes. */ 2270 if (job->uaiocb.aio_lio_opcode != LIO_WRITE) 2271 return (EOPNOTSUPP); 2272 2273 if (!sc->tt.tx_zcopy) 2274 return (EOPNOTSUPP); 2275 2276 if (tls_tx_key(toep)) 2277 return (EOPNOTSUPP); 2278 2279 SOCKBUF_LOCK(&so->so_snd); 2280 #ifdef VERBOSE_TRACES 2281 CTR2(KTR_CXGBE, "%s: queueing %p", __func__, job); 2282 #endif 2283 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) 2284 panic("new job was cancelled"); 2285 TAILQ_INSERT_TAIL(&toep->aiotx_jobq, job, list); 2286 if (sowriteable(so)) 2287 t4_aiotx_queue_toep(toep); 2288 SOCKBUF_UNLOCK(&so->so_snd); 2289 return (0); 2290 } 2291 2292 void 2293 aiotx_init_toep(struct toepcb *toep) 2294 { 2295 2296 TAILQ_INIT(&toep->aiotx_jobq); 2297 TASK_INIT(&toep->aiotx_task, 0, t4_aiotx_task, toep); 2298 } 2299 #endif 2300