1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012, 2015 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 #include "opt_ratelimit.h" 36 37 #ifdef TCP_OFFLOAD 38 #include <sys/param.h> 39 #include <sys/aio.h> 40 #include <sys/file.h> 41 #include <sys/kernel.h> 42 #include <sys/ktr.h> 43 #include <sys/module.h> 44 #include <sys/proc.h> 45 #include <sys/protosw.h> 46 #include <sys/domain.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/sglist.h> 50 #include <sys/taskqueue.h> 51 #include <netinet/in.h> 52 #include <netinet/in_pcb.h> 53 #include <netinet/ip.h> 54 #include <netinet/ip6.h> 55 #define TCPSTATES 56 #include <netinet/tcp_fsm.h> 57 #include <netinet/tcp_seq.h> 58 #include <netinet/tcp_var.h> 59 #include <netinet/toecore.h> 60 61 #include <security/mac/mac_framework.h> 62 63 #include <vm/vm.h> 64 #include <vm/vm_extern.h> 65 #include <vm/pmap.h> 66 #include <vm/vm_map.h> 67 #include <vm/vm_page.h> 68 69 #include "common/common.h" 70 #include "common/t4_msg.h" 71 #include "common/t4_regs.h" 72 #include "common/t4_tcb.h" 73 #include "tom/t4_tom_l2t.h" 74 #include "tom/t4_tom.h" 75 76 static void t4_aiotx_cancel(struct kaiocb *job); 77 static void t4_aiotx_queue_toep(struct socket *so, struct toepcb *toep); 78 79 void 80 send_flowc_wr(struct toepcb *toep, struct tcpcb *tp) 81 { 82 struct wrqe *wr; 83 struct fw_flowc_wr *flowc; 84 unsigned int nparams, flowclen, paramidx; 85 struct vi_info *vi = toep->vi; 86 struct port_info *pi = vi->pi; 87 struct adapter *sc = pi->adapter; 88 unsigned int pfvf = sc->pf << S_FW_VIID_PFN; 89 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 90 91 KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT), 92 ("%s: flowc for tid %u sent already", __func__, toep->tid)); 93 94 if (tp != NULL) 95 nparams = 8; 96 else 97 nparams = 6; 98 if (ulp_mode(toep) == ULP_MODE_TLS) 99 nparams++; 100 if (toep->tls.fcplenmax != 0) 101 nparams++; 102 if (toep->params.tc_idx != -1) { 103 MPASS(toep->params.tc_idx >= 0 && 104 toep->params.tc_idx < sc->chip_params->nsched_cls); 105 nparams++; 106 } 107 108 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); 109 110 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq); 111 if (wr == NULL) { 112 /* XXX */ 113 panic("%s: allocation failure.", __func__); 114 } 115 flowc = wrtod(wr); 116 memset(flowc, 0, wr->wr_len); 117 118 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 119 V_FW_FLOWC_WR_NPARAMS(nparams)); 120 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | 121 V_FW_WR_FLOWID(toep->tid)); 122 123 #define FLOWC_PARAM(__m, __v) \ 124 do { \ 125 flowc->mnemval[paramidx].mnemonic = FW_FLOWC_MNEM_##__m; \ 126 flowc->mnemval[paramidx].val = htobe32(__v); \ 127 paramidx++; \ 128 } while (0) 129 130 paramidx = 0; 131 132 FLOWC_PARAM(PFNVFN, pfvf); 133 FLOWC_PARAM(CH, pi->tx_chan); 134 FLOWC_PARAM(PORT, pi->tx_chan); 135 FLOWC_PARAM(IQID, toep->ofld_rxq->iq.abs_id); 136 FLOWC_PARAM(SNDBUF, toep->params.sndbuf); 137 FLOWC_PARAM(MSS, toep->params.emss); 138 if (tp) { 139 FLOWC_PARAM(SNDNXT, tp->snd_nxt); 140 FLOWC_PARAM(RCVNXT, tp->rcv_nxt); 141 } 142 CTR6(KTR_CXGBE, 143 "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x", 144 __func__, toep->tid, toep->params.emss, toep->params.sndbuf, 145 tp ? tp->snd_nxt : 0, tp ? tp->rcv_nxt : 0); 146 147 if (ulp_mode(toep) == ULP_MODE_TLS) 148 FLOWC_PARAM(ULP_MODE, ulp_mode(toep)); 149 if (toep->tls.fcplenmax != 0) 150 FLOWC_PARAM(TXDATAPLEN_MAX, toep->tls.fcplenmax); 151 if (toep->params.tc_idx != -1) 152 FLOWC_PARAM(SCHEDCLASS, toep->params.tc_idx); 153 #undef FLOWC_PARAM 154 155 KASSERT(paramidx == nparams, ("nparams mismatch")); 156 157 txsd->tx_credits = howmany(flowclen, 16); 158 txsd->plen = 0; 159 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 160 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 161 toep->tx_credits -= txsd->tx_credits; 162 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 163 toep->txsd_pidx = 0; 164 toep->txsd_avail--; 165 166 toep->flags |= TPF_FLOWC_WR_SENT; 167 t4_wrq_tx(sc, wr); 168 } 169 170 #ifdef RATELIMIT 171 /* 172 * Input is Bytes/second (so_max_pacing_rate), chip counts in Kilobits/second. 173 */ 174 static int 175 update_tx_rate_limit(struct adapter *sc, struct toepcb *toep, u_int Bps) 176 { 177 int tc_idx, rc; 178 const u_int kbps = (u_int) (uint64_t)Bps * 8ULL / 1000; 179 const int port_id = toep->vi->pi->port_id; 180 181 CTR3(KTR_CXGBE, "%s: tid %u, rate %uKbps", __func__, toep->tid, kbps); 182 183 if (kbps == 0) { 184 /* unbind */ 185 tc_idx = -1; 186 } else { 187 rc = t4_reserve_cl_rl_kbps(sc, port_id, kbps, &tc_idx); 188 if (rc != 0) 189 return (rc); 190 MPASS(tc_idx >= 0 && tc_idx < sc->chip_params->nsched_cls); 191 } 192 193 if (toep->params.tc_idx != tc_idx) { 194 struct wrqe *wr; 195 struct fw_flowc_wr *flowc; 196 int nparams = 1, flowclen, flowclen16; 197 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 198 199 flowclen = sizeof(*flowc) + nparams * sizeof(struct 200 fw_flowc_mnemval); 201 flowclen16 = howmany(flowclen, 16); 202 if (toep->tx_credits < flowclen16 || toep->txsd_avail == 0 || 203 (wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq)) == NULL) { 204 if (tc_idx >= 0) 205 t4_release_cl_rl(sc, port_id, tc_idx); 206 return (ENOMEM); 207 } 208 209 flowc = wrtod(wr); 210 memset(flowc, 0, wr->wr_len); 211 212 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 213 V_FW_FLOWC_WR_NPARAMS(nparams)); 214 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(flowclen16) | 215 V_FW_WR_FLOWID(toep->tid)); 216 217 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; 218 if (tc_idx == -1) 219 flowc->mnemval[0].val = htobe32(0xff); 220 else 221 flowc->mnemval[0].val = htobe32(tc_idx); 222 223 txsd->tx_credits = flowclen16; 224 txsd->plen = 0; 225 toep->tx_credits -= txsd->tx_credits; 226 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 227 toep->txsd_pidx = 0; 228 toep->txsd_avail--; 229 t4_wrq_tx(sc, wr); 230 } 231 232 if (toep->params.tc_idx >= 0) 233 t4_release_cl_rl(sc, port_id, toep->params.tc_idx); 234 toep->params.tc_idx = tc_idx; 235 236 return (0); 237 } 238 #endif 239 240 void 241 send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt) 242 { 243 struct wrqe *wr; 244 struct cpl_abort_req *req; 245 int tid = toep->tid; 246 struct inpcb *inp = toep->inp; 247 struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */ 248 249 INP_WLOCK_ASSERT(inp); 250 251 CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s", 252 __func__, toep->tid, 253 inp->inp_flags & INP_DROPPED ? "inp dropped" : 254 tcpstates[tp->t_state], 255 toep->flags, inp->inp_flags, 256 toep->flags & TPF_ABORT_SHUTDOWN ? 257 " (abort already in progress)" : ""); 258 259 if (toep->flags & TPF_ABORT_SHUTDOWN) 260 return; /* abort already in progress */ 261 262 toep->flags |= TPF_ABORT_SHUTDOWN; 263 264 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 265 ("%s: flowc_wr not sent for tid %d.", __func__, tid)); 266 267 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 268 if (wr == NULL) { 269 /* XXX */ 270 panic("%s: allocation failure.", __func__); 271 } 272 req = wrtod(wr); 273 274 INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid); 275 if (inp->inp_flags & INP_DROPPED) 276 req->rsvd0 = htobe32(snd_nxt); 277 else 278 req->rsvd0 = htobe32(tp->snd_nxt); 279 req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT); 280 req->cmd = CPL_ABORT_SEND_RST; 281 282 /* 283 * XXX: What's the correct way to tell that the inp hasn't been detached 284 * from its socket? Should I even be flushing the snd buffer here? 285 */ 286 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 287 struct socket *so = inp->inp_socket; 288 289 if (so != NULL) /* because I'm not sure. See comment above */ 290 sbflush(&so->so_snd); 291 } 292 293 t4_l2t_send(sc, wr, toep->l2te); 294 } 295 296 /* 297 * Called when a connection is established to translate the TCP options 298 * reported by HW to FreeBSD's native format. 299 */ 300 static void 301 assign_rxopt(struct tcpcb *tp, uint16_t opt) 302 { 303 struct toepcb *toep = tp->t_toe; 304 struct inpcb *inp = tp->t_inpcb; 305 struct adapter *sc = td_adapter(toep->td); 306 307 INP_LOCK_ASSERT(inp); 308 309 toep->params.mtu_idx = G_TCPOPT_MSS(opt); 310 tp->t_maxseg = sc->params.mtus[toep->params.mtu_idx]; 311 if (inp->inp_inc.inc_flags & INC_ISIPV6) 312 tp->t_maxseg -= sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 313 else 314 tp->t_maxseg -= sizeof(struct ip) + sizeof(struct tcphdr); 315 316 toep->params.emss = tp->t_maxseg; 317 if (G_TCPOPT_TSTAMP(opt)) { 318 toep->params.tstamp = 1; 319 toep->params.emss -= TCPOLEN_TSTAMP_APPA; 320 tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */ 321 tp->ts_recent = 0; /* hmmm */ 322 tp->ts_recent_age = tcp_ts_getticks(); 323 } else 324 toep->params.tstamp = 0; 325 326 if (G_TCPOPT_SACK(opt)) { 327 toep->params.sack = 1; 328 tp->t_flags |= TF_SACK_PERMIT; /* should already be set */ 329 } else { 330 toep->params.sack = 0; 331 tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */ 332 } 333 334 if (G_TCPOPT_WSCALE_OK(opt)) 335 tp->t_flags |= TF_RCVD_SCALE; 336 337 /* Doing window scaling? */ 338 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 339 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 340 tp->rcv_scale = tp->request_r_scale; 341 tp->snd_scale = G_TCPOPT_SND_WSCALE(opt); 342 } else 343 toep->params.wscale = 0; 344 345 CTR6(KTR_CXGBE, 346 "assign_rxopt: tid %d, mtu_idx %u, emss %u, ts %u, sack %u, wscale %u", 347 toep->tid, toep->params.mtu_idx, toep->params.emss, 348 toep->params.tstamp, toep->params.sack, toep->params.wscale); 349 } 350 351 /* 352 * Completes some final bits of initialization for just established connections 353 * and changes their state to TCPS_ESTABLISHED. 354 * 355 * The ISNs are from the exchange of SYNs. 356 */ 357 void 358 make_established(struct toepcb *toep, uint32_t iss, uint32_t irs, uint16_t opt) 359 { 360 struct inpcb *inp = toep->inp; 361 struct socket *so = inp->inp_socket; 362 struct tcpcb *tp = intotcpcb(inp); 363 uint16_t tcpopt = be16toh(opt); 364 365 INP_WLOCK_ASSERT(inp); 366 KASSERT(tp->t_state == TCPS_SYN_SENT || 367 tp->t_state == TCPS_SYN_RECEIVED, 368 ("%s: TCP state %s", __func__, tcpstates[tp->t_state])); 369 370 CTR6(KTR_CXGBE, "%s: tid %d, so %p, inp %p, tp %p, toep %p", 371 __func__, toep->tid, so, inp, tp, toep); 372 373 tcp_state_change(tp, TCPS_ESTABLISHED); 374 tp->t_starttime = ticks; 375 TCPSTAT_INC(tcps_connects); 376 377 tp->irs = irs; 378 tcp_rcvseqinit(tp); 379 tp->rcv_wnd = (u_int)toep->params.opt0_bufsize << 10; 380 tp->rcv_adv += tp->rcv_wnd; 381 tp->last_ack_sent = tp->rcv_nxt; 382 383 tp->iss = iss; 384 tcp_sendseqinit(tp); 385 tp->snd_una = iss + 1; 386 tp->snd_nxt = iss + 1; 387 tp->snd_max = iss + 1; 388 389 assign_rxopt(tp, tcpopt); 390 send_flowc_wr(toep, tp); 391 392 soisconnected(so); 393 } 394 395 int 396 send_rx_credits(struct adapter *sc, struct toepcb *toep, int credits) 397 { 398 struct wrqe *wr; 399 struct cpl_rx_data_ack *req; 400 uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); 401 402 KASSERT(credits >= 0, ("%s: %d credits", __func__, credits)); 403 404 wr = alloc_wrqe(sizeof(*req), toep->ctrlq); 405 if (wr == NULL) 406 return (0); 407 req = wrtod(wr); 408 409 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); 410 req->credit_dack = htobe32(dack | V_RX_CREDITS(credits)); 411 412 t4_wrq_tx(sc, wr); 413 return (credits); 414 } 415 416 void 417 send_rx_modulate(struct adapter *sc, struct toepcb *toep) 418 { 419 struct wrqe *wr; 420 struct cpl_rx_data_ack *req; 421 422 wr = alloc_wrqe(sizeof(*req), toep->ctrlq); 423 if (wr == NULL) 424 return; 425 req = wrtod(wr); 426 427 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); 428 req->credit_dack = htobe32(F_RX_MODULATE_RX); 429 430 t4_wrq_tx(sc, wr); 431 } 432 433 void 434 t4_rcvd_locked(struct toedev *tod, struct tcpcb *tp) 435 { 436 struct adapter *sc = tod->tod_softc; 437 struct inpcb *inp = tp->t_inpcb; 438 struct socket *so = inp->inp_socket; 439 struct sockbuf *sb = &so->so_rcv; 440 struct toepcb *toep = tp->t_toe; 441 int rx_credits; 442 443 INP_WLOCK_ASSERT(inp); 444 SOCKBUF_LOCK_ASSERT(sb); 445 446 rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0; 447 if (ulp_mode(toep) == ULP_MODE_TLS) { 448 if (toep->tls.rcv_over >= rx_credits) { 449 toep->tls.rcv_over -= rx_credits; 450 rx_credits = 0; 451 } else { 452 rx_credits -= toep->tls.rcv_over; 453 toep->tls.rcv_over = 0; 454 } 455 } 456 457 if (rx_credits > 0 && 458 (tp->rcv_wnd <= 32 * 1024 || rx_credits >= 64 * 1024 || 459 (rx_credits >= 16 * 1024 && tp->rcv_wnd <= 128 * 1024) || 460 sbused(sb) + tp->rcv_wnd < sb->sb_lowat)) { 461 rx_credits = send_rx_credits(sc, toep, rx_credits); 462 tp->rcv_wnd += rx_credits; 463 tp->rcv_adv += rx_credits; 464 } else if (toep->flags & TPF_FORCE_CREDITS) 465 send_rx_modulate(sc, toep); 466 } 467 468 void 469 t4_rcvd(struct toedev *tod, struct tcpcb *tp) 470 { 471 struct inpcb *inp = tp->t_inpcb; 472 struct socket *so = inp->inp_socket; 473 struct sockbuf *sb = &so->so_rcv; 474 475 SOCKBUF_LOCK(sb); 476 t4_rcvd_locked(tod, tp); 477 SOCKBUF_UNLOCK(sb); 478 } 479 480 /* 481 * Close a connection by sending a CPL_CLOSE_CON_REQ message. 482 */ 483 int 484 t4_close_conn(struct adapter *sc, struct toepcb *toep) 485 { 486 struct wrqe *wr; 487 struct cpl_close_con_req *req; 488 unsigned int tid = toep->tid; 489 490 CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid, 491 toep->flags & TPF_FIN_SENT ? ", IGNORED" : ""); 492 493 if (toep->flags & TPF_FIN_SENT) 494 return (0); 495 496 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 497 ("%s: flowc_wr not sent for tid %u.", __func__, tid)); 498 499 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 500 if (wr == NULL) { 501 /* XXX */ 502 panic("%s: allocation failure.", __func__); 503 } 504 req = wrtod(wr); 505 506 req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | 507 V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr))); 508 req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) | 509 V_FW_WR_FLOWID(tid)); 510 req->wr.wr_lo = cpu_to_be64(0); 511 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 512 req->rsvd = 0; 513 514 toep->flags |= TPF_FIN_SENT; 515 toep->flags &= ~TPF_SEND_FIN; 516 t4_l2t_send(sc, wr, toep->l2te); 517 518 return (0); 519 } 520 521 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16) 522 #define MIN_OFLD_TX_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16)) 523 524 /* Maximum amount of immediate data we could stuff in a WR */ 525 static inline int 526 max_imm_payload(int tx_credits) 527 { 528 const int n = 1; /* Use no more than one desc for imm. data WR */ 529 530 KASSERT(tx_credits >= 0 && 531 tx_credits <= MAX_OFLD_TX_CREDITS, 532 ("%s: %d credits", __func__, tx_credits)); 533 534 if (tx_credits < MIN_OFLD_TX_CREDITS) 535 return (0); 536 537 if (tx_credits >= (n * EQ_ESIZE) / 16) 538 return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr)); 539 else 540 return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr)); 541 } 542 543 /* Maximum number of SGL entries we could stuff in a WR */ 544 static inline int 545 max_dsgl_nsegs(int tx_credits) 546 { 547 int nseg = 1; /* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */ 548 int sge_pair_credits = tx_credits - MIN_OFLD_TX_CREDITS; 549 550 KASSERT(tx_credits >= 0 && 551 tx_credits <= MAX_OFLD_TX_CREDITS, 552 ("%s: %d credits", __func__, tx_credits)); 553 554 if (tx_credits < MIN_OFLD_TX_CREDITS) 555 return (0); 556 557 nseg += 2 * (sge_pair_credits * 16 / 24); 558 if ((sge_pair_credits * 16) % 24 == 16) 559 nseg++; 560 561 return (nseg); 562 } 563 564 static inline void 565 write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen, 566 unsigned int plen, uint8_t credits, int shove, int ulp_submode) 567 { 568 struct fw_ofld_tx_data_wr *txwr = dst; 569 570 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) | 571 V_FW_WR_IMMDLEN(immdlen)); 572 txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) | 573 V_FW_WR_LEN16(credits)); 574 txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ulp_mode(toep)) | 575 V_TX_ULP_SUBMODE(ulp_submode) | V_TX_URG(0) | V_TX_SHOVE(shove)); 576 txwr->plen = htobe32(plen); 577 578 if (toep->params.tx_align > 0) { 579 if (plen < 2 * toep->params.emss) 580 txwr->lsodisable_to_flags |= 581 htobe32(F_FW_OFLD_TX_DATA_WR_LSODISABLE); 582 else 583 txwr->lsodisable_to_flags |= 584 htobe32(F_FW_OFLD_TX_DATA_WR_ALIGNPLD | 585 (toep->params.nagle == 0 ? 0 : 586 F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE)); 587 } 588 } 589 590 /* 591 * Generate a DSGL from a starting mbuf. The total number of segments and the 592 * maximum segments in any one mbuf are provided. 593 */ 594 static void 595 write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n) 596 { 597 struct mbuf *m; 598 struct ulptx_sgl *usgl = dst; 599 int i, j, rc; 600 struct sglist sg; 601 struct sglist_seg segs[n]; 602 603 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__)); 604 605 sglist_init(&sg, n, segs); 606 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 607 V_ULPTX_NSGE(nsegs)); 608 609 i = -1; 610 for (m = start; m != stop; m = m->m_next) { 611 if (m->m_flags & M_NOMAP) 612 rc = sglist_append_mb_ext_pgs(&sg, m); 613 else 614 rc = sglist_append(&sg, mtod(m, void *), m->m_len); 615 if (__predict_false(rc != 0)) 616 panic("%s: sglist_append %d", __func__, rc); 617 618 for (j = 0; j < sg.sg_nseg; i++, j++) { 619 if (i < 0) { 620 usgl->len0 = htobe32(segs[j].ss_len); 621 usgl->addr0 = htobe64(segs[j].ss_paddr); 622 } else { 623 usgl->sge[i / 2].len[i & 1] = 624 htobe32(segs[j].ss_len); 625 usgl->sge[i / 2].addr[i & 1] = 626 htobe64(segs[j].ss_paddr); 627 } 628 #ifdef INVARIANTS 629 nsegs--; 630 #endif 631 } 632 sglist_reset(&sg); 633 } 634 if (i & 1) 635 usgl->sge[i / 2].len[1] = htobe32(0); 636 KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, stop %p", 637 __func__, nsegs, start, stop)); 638 } 639 640 /* 641 * Max number of SGL entries an offload tx work request can have. This is 41 642 * (1 + 40) for a full 512B work request. 643 * fw_ofld_tx_data_wr(16B) + ulptx_sgl(16B, 1) + ulptx_sge_pair(480B, 40) 644 */ 645 #define OFLD_SGL_LEN (41) 646 647 /* 648 * Send data and/or a FIN to the peer. 649 * 650 * The socket's so_snd buffer consists of a stream of data starting with sb_mb 651 * and linked together with m_next. sb_sndptr, if set, is the last mbuf that 652 * was transmitted. 653 * 654 * drop indicates the number of bytes that should be dropped from the head of 655 * the send buffer. It is an optimization that lets do_fw4_ack avoid creating 656 * contention on the send buffer lock (before this change it used to do 657 * sowwakeup and then t4_push_frames right after that when recovering from tx 658 * stalls). When drop is set this function MUST drop the bytes and wake up any 659 * writers. 660 */ 661 void 662 t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop) 663 { 664 struct mbuf *sndptr, *m, *sb_sndptr; 665 struct fw_ofld_tx_data_wr *txwr; 666 struct wrqe *wr; 667 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 668 struct inpcb *inp = toep->inp; 669 struct tcpcb *tp = intotcpcb(inp); 670 struct socket *so = inp->inp_socket; 671 struct sockbuf *sb = &so->so_snd; 672 int tx_credits, shove, compl, sowwakeup; 673 struct ofld_tx_sdesc *txsd; 674 bool nomap_mbuf_seen; 675 676 INP_WLOCK_ASSERT(inp); 677 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 678 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 679 680 KASSERT(ulp_mode(toep) == ULP_MODE_NONE || 681 ulp_mode(toep) == ULP_MODE_TCPDDP || 682 ulp_mode(toep) == ULP_MODE_TLS || 683 ulp_mode(toep) == ULP_MODE_RDMA, 684 ("%s: ulp_mode %u for toep %p", __func__, ulp_mode(toep), toep)); 685 686 #ifdef VERBOSE_TRACES 687 CTR5(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d", 688 __func__, toep->tid, toep->flags, tp->t_flags, drop); 689 #endif 690 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) 691 return; 692 693 #ifdef RATELIMIT 694 if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) && 695 (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) { 696 inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED; 697 } 698 #endif 699 700 /* 701 * This function doesn't resume by itself. Someone else must clear the 702 * flag and call this function. 703 */ 704 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 705 KASSERT(drop == 0, 706 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 707 return; 708 } 709 710 txsd = &toep->txsd[toep->txsd_pidx]; 711 do { 712 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 713 max_imm = max_imm_payload(tx_credits); 714 max_nsegs = max_dsgl_nsegs(tx_credits); 715 716 SOCKBUF_LOCK(sb); 717 sowwakeup = drop; 718 if (drop) { 719 sbdrop_locked(sb, drop); 720 drop = 0; 721 } 722 sb_sndptr = sb->sb_sndptr; 723 sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb; 724 plen = 0; 725 nsegs = 0; 726 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 727 nomap_mbuf_seen = false; 728 for (m = sndptr; m != NULL; m = m->m_next) { 729 int n; 730 731 if (m->m_flags & M_NOMAP) 732 n = sglist_count_mb_ext_pgs(m); 733 else 734 n = sglist_count(mtod(m, void *), m->m_len); 735 736 nsegs += n; 737 plen += m->m_len; 738 739 /* This mbuf sent us _over_ the nsegs limit, back out */ 740 if (plen > max_imm && nsegs > max_nsegs) { 741 nsegs -= n; 742 plen -= m->m_len; 743 if (plen == 0) { 744 /* Too few credits */ 745 toep->flags |= TPF_TX_SUSPENDED; 746 if (sowwakeup) { 747 if (!TAILQ_EMPTY( 748 &toep->aiotx_jobq)) 749 t4_aiotx_queue_toep(so, 750 toep); 751 sowwakeup_locked(so); 752 } else 753 SOCKBUF_UNLOCK(sb); 754 SOCKBUF_UNLOCK_ASSERT(sb); 755 return; 756 } 757 break; 758 } 759 760 if (m->m_flags & M_NOMAP) 761 nomap_mbuf_seen = true; 762 if (max_nsegs_1mbuf < n) 763 max_nsegs_1mbuf = n; 764 sb_sndptr = m; /* new sb->sb_sndptr if all goes well */ 765 766 /* This mbuf put us right at the max_nsegs limit */ 767 if (plen > max_imm && nsegs == max_nsegs) { 768 m = m->m_next; 769 break; 770 } 771 } 772 773 if (sbused(sb) > sb->sb_hiwat * 5 / 8 && 774 toep->plen_nocompl + plen >= sb->sb_hiwat / 4) 775 compl = 1; 776 else 777 compl = 0; 778 779 if (sb->sb_flags & SB_AUTOSIZE && 780 V_tcp_do_autosndbuf && 781 sb->sb_hiwat < V_tcp_autosndbuf_max && 782 sbused(sb) >= sb->sb_hiwat * 7 / 8) { 783 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc, 784 V_tcp_autosndbuf_max); 785 786 if (!sbreserve_locked(sb, newsize, so, NULL)) 787 sb->sb_flags &= ~SB_AUTOSIZE; 788 else 789 sowwakeup = 1; /* room available */ 790 } 791 if (sowwakeup) { 792 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) 793 t4_aiotx_queue_toep(so, toep); 794 sowwakeup_locked(so); 795 } else 796 SOCKBUF_UNLOCK(sb); 797 SOCKBUF_UNLOCK_ASSERT(sb); 798 799 /* nothing to send */ 800 if (plen == 0) { 801 KASSERT(m == NULL, 802 ("%s: nothing to send, but m != NULL", __func__)); 803 break; 804 } 805 806 if (__predict_false(toep->flags & TPF_FIN_SENT)) 807 panic("%s: excess tx.", __func__); 808 809 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME); 810 if (plen <= max_imm && !nomap_mbuf_seen) { 811 812 /* Immediate data tx */ 813 814 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 815 toep->ofld_txq); 816 if (wr == NULL) { 817 /* XXX: how will we recover from this? */ 818 toep->flags |= TPF_TX_SUSPENDED; 819 return; 820 } 821 txwr = wrtod(wr); 822 credits = howmany(wr->wr_len, 16); 823 write_tx_wr(txwr, toep, plen, plen, credits, shove, 0); 824 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 825 nsegs = 0; 826 } else { 827 int wr_len; 828 829 /* DSGL tx */ 830 831 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 832 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 833 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 834 if (wr == NULL) { 835 /* XXX: how will we recover from this? */ 836 toep->flags |= TPF_TX_SUSPENDED; 837 return; 838 } 839 txwr = wrtod(wr); 840 credits = howmany(wr_len, 16); 841 write_tx_wr(txwr, toep, 0, plen, credits, shove, 0); 842 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 843 max_nsegs_1mbuf); 844 if (wr_len & 0xf) { 845 uint64_t *pad = (uint64_t *) 846 ((uintptr_t)txwr + wr_len); 847 *pad = 0; 848 } 849 } 850 851 KASSERT(toep->tx_credits >= credits, 852 ("%s: not enough credits", __func__)); 853 854 toep->tx_credits -= credits; 855 toep->tx_nocompl += credits; 856 toep->plen_nocompl += plen; 857 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 858 toep->tx_nocompl >= toep->tx_total / 4) 859 compl = 1; 860 861 if (compl || ulp_mode(toep) == ULP_MODE_RDMA) { 862 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 863 toep->tx_nocompl = 0; 864 toep->plen_nocompl = 0; 865 } 866 867 tp->snd_nxt += plen; 868 tp->snd_max += plen; 869 870 SOCKBUF_LOCK(sb); 871 KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__)); 872 sb->sb_sndptr = sb_sndptr; 873 SOCKBUF_UNLOCK(sb); 874 875 toep->flags |= TPF_TX_DATA_SENT; 876 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 877 toep->flags |= TPF_TX_SUSPENDED; 878 879 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 880 txsd->plen = plen; 881 txsd->tx_credits = credits; 882 txsd++; 883 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 884 toep->txsd_pidx = 0; 885 txsd = &toep->txsd[0]; 886 } 887 toep->txsd_avail--; 888 889 t4_l2t_send(sc, wr, toep->l2te); 890 } while (m != NULL); 891 892 /* Send a FIN if requested, but only if there's no more data to send */ 893 if (m == NULL && toep->flags & TPF_SEND_FIN) 894 t4_close_conn(sc, toep); 895 } 896 897 static inline void 898 rqdrop_locked(struct mbufq *q, int plen) 899 { 900 struct mbuf *m; 901 902 while (plen > 0) { 903 m = mbufq_dequeue(q); 904 905 /* Too many credits. */ 906 MPASS(m != NULL); 907 M_ASSERTPKTHDR(m); 908 909 /* Partial credits. */ 910 MPASS(plen >= m->m_pkthdr.len); 911 912 plen -= m->m_pkthdr.len; 913 m_freem(m); 914 } 915 } 916 917 void 918 t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop) 919 { 920 struct mbuf *sndptr, *m; 921 struct fw_ofld_tx_data_wr *txwr; 922 struct wrqe *wr; 923 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 924 u_int adjusted_plen, ulp_submode; 925 struct inpcb *inp = toep->inp; 926 struct tcpcb *tp = intotcpcb(inp); 927 int tx_credits, shove; 928 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 929 struct mbufq *pduq = &toep->ulp_pduq; 930 static const u_int ulp_extra_len[] = {0, 4, 4, 8}; 931 932 INP_WLOCK_ASSERT(inp); 933 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 934 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 935 KASSERT(ulp_mode(toep) == ULP_MODE_ISCSI, 936 ("%s: ulp_mode %u for toep %p", __func__, ulp_mode(toep), toep)); 937 938 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) 939 return; 940 941 /* 942 * This function doesn't resume by itself. Someone else must clear the 943 * flag and call this function. 944 */ 945 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 946 KASSERT(drop == 0, 947 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 948 return; 949 } 950 951 if (drop) 952 rqdrop_locked(&toep->ulp_pdu_reclaimq, drop); 953 954 while ((sndptr = mbufq_first(pduq)) != NULL) { 955 M_ASSERTPKTHDR(sndptr); 956 957 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 958 max_imm = max_imm_payload(tx_credits); 959 max_nsegs = max_dsgl_nsegs(tx_credits); 960 961 plen = 0; 962 nsegs = 0; 963 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 964 for (m = sndptr; m != NULL; m = m->m_next) { 965 int n = sglist_count(mtod(m, void *), m->m_len); 966 967 nsegs += n; 968 plen += m->m_len; 969 970 /* 971 * This mbuf would send us _over_ the nsegs limit. 972 * Suspend tx because the PDU can't be sent out. 973 */ 974 if (plen > max_imm && nsegs > max_nsegs) { 975 toep->flags |= TPF_TX_SUSPENDED; 976 return; 977 } 978 979 if (max_nsegs_1mbuf < n) 980 max_nsegs_1mbuf = n; 981 } 982 983 if (__predict_false(toep->flags & TPF_FIN_SENT)) 984 panic("%s: excess tx.", __func__); 985 986 /* 987 * We have a PDU to send. All of it goes out in one WR so 'm' 988 * is NULL. A PDU's length is always a multiple of 4. 989 */ 990 MPASS(m == NULL); 991 MPASS((plen & 3) == 0); 992 MPASS(sndptr->m_pkthdr.len == plen); 993 994 shove = !(tp->t_flags & TF_MORETOCOME); 995 ulp_submode = mbuf_ulp_submode(sndptr); 996 MPASS(ulp_submode < nitems(ulp_extra_len)); 997 998 /* 999 * plen doesn't include header and data digests, which are 1000 * generated and inserted in the right places by the TOE, but 1001 * they do occupy TCP sequence space and need to be accounted 1002 * for. 1003 */ 1004 adjusted_plen = plen + ulp_extra_len[ulp_submode]; 1005 if (plen <= max_imm) { 1006 1007 /* Immediate data tx */ 1008 1009 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 1010 toep->ofld_txq); 1011 if (wr == NULL) { 1012 /* XXX: how will we recover from this? */ 1013 toep->flags |= TPF_TX_SUSPENDED; 1014 return; 1015 } 1016 txwr = wrtod(wr); 1017 credits = howmany(wr->wr_len, 16); 1018 write_tx_wr(txwr, toep, plen, adjusted_plen, credits, 1019 shove, ulp_submode); 1020 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 1021 nsegs = 0; 1022 } else { 1023 int wr_len; 1024 1025 /* DSGL tx */ 1026 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 1027 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 1028 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 1029 if (wr == NULL) { 1030 /* XXX: how will we recover from this? */ 1031 toep->flags |= TPF_TX_SUSPENDED; 1032 return; 1033 } 1034 txwr = wrtod(wr); 1035 credits = howmany(wr_len, 16); 1036 write_tx_wr(txwr, toep, 0, adjusted_plen, credits, 1037 shove, ulp_submode); 1038 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 1039 max_nsegs_1mbuf); 1040 if (wr_len & 0xf) { 1041 uint64_t *pad = (uint64_t *) 1042 ((uintptr_t)txwr + wr_len); 1043 *pad = 0; 1044 } 1045 } 1046 1047 KASSERT(toep->tx_credits >= credits, 1048 ("%s: not enough credits", __func__)); 1049 1050 m = mbufq_dequeue(pduq); 1051 MPASS(m == sndptr); 1052 mbufq_enqueue(&toep->ulp_pdu_reclaimq, m); 1053 1054 toep->tx_credits -= credits; 1055 toep->tx_nocompl += credits; 1056 toep->plen_nocompl += plen; 1057 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 1058 toep->tx_nocompl >= toep->tx_total / 4) { 1059 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 1060 toep->tx_nocompl = 0; 1061 toep->plen_nocompl = 0; 1062 } 1063 1064 tp->snd_nxt += adjusted_plen; 1065 tp->snd_max += adjusted_plen; 1066 1067 toep->flags |= TPF_TX_DATA_SENT; 1068 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 1069 toep->flags |= TPF_TX_SUSPENDED; 1070 1071 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 1072 txsd->plen = plen; 1073 txsd->tx_credits = credits; 1074 txsd++; 1075 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 1076 toep->txsd_pidx = 0; 1077 txsd = &toep->txsd[0]; 1078 } 1079 toep->txsd_avail--; 1080 1081 t4_l2t_send(sc, wr, toep->l2te); 1082 } 1083 1084 /* Send a FIN if requested, but only if there are no more PDUs to send */ 1085 if (mbufq_first(pduq) == NULL && toep->flags & TPF_SEND_FIN) 1086 t4_close_conn(sc, toep); 1087 } 1088 1089 int 1090 t4_tod_output(struct toedev *tod, struct tcpcb *tp) 1091 { 1092 struct adapter *sc = tod->tod_softc; 1093 #ifdef INVARIANTS 1094 struct inpcb *inp = tp->t_inpcb; 1095 #endif 1096 struct toepcb *toep = tp->t_toe; 1097 1098 INP_WLOCK_ASSERT(inp); 1099 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1100 ("%s: inp %p dropped.", __func__, inp)); 1101 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1102 1103 if (ulp_mode(toep) == ULP_MODE_ISCSI) 1104 t4_push_pdus(sc, toep, 0); 1105 else if (tls_tx_key(toep)) 1106 t4_push_tls_records(sc, toep, 0); 1107 else 1108 t4_push_frames(sc, toep, 0); 1109 1110 return (0); 1111 } 1112 1113 int 1114 t4_send_fin(struct toedev *tod, struct tcpcb *tp) 1115 { 1116 struct adapter *sc = tod->tod_softc; 1117 #ifdef INVARIANTS 1118 struct inpcb *inp = tp->t_inpcb; 1119 #endif 1120 struct toepcb *toep = tp->t_toe; 1121 1122 INP_WLOCK_ASSERT(inp); 1123 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1124 ("%s: inp %p dropped.", __func__, inp)); 1125 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1126 1127 toep->flags |= TPF_SEND_FIN; 1128 if (tp->t_state >= TCPS_ESTABLISHED) { 1129 if (ulp_mode(toep) == ULP_MODE_ISCSI) 1130 t4_push_pdus(sc, toep, 0); 1131 else if (tls_tx_key(toep)) 1132 t4_push_tls_records(sc, toep, 0); 1133 else 1134 t4_push_frames(sc, toep, 0); 1135 } 1136 1137 return (0); 1138 } 1139 1140 int 1141 t4_send_rst(struct toedev *tod, struct tcpcb *tp) 1142 { 1143 struct adapter *sc = tod->tod_softc; 1144 #if defined(INVARIANTS) 1145 struct inpcb *inp = tp->t_inpcb; 1146 #endif 1147 struct toepcb *toep = tp->t_toe; 1148 1149 INP_WLOCK_ASSERT(inp); 1150 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1151 ("%s: inp %p dropped.", __func__, inp)); 1152 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1153 1154 /* hmmmm */ 1155 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 1156 ("%s: flowc for tid %u [%s] not sent already", 1157 __func__, toep->tid, tcpstates[tp->t_state])); 1158 1159 send_reset(sc, toep, 0); 1160 return (0); 1161 } 1162 1163 /* 1164 * Peer has sent us a FIN. 1165 */ 1166 static int 1167 do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1168 { 1169 struct adapter *sc = iq->adapter; 1170 const struct cpl_peer_close *cpl = (const void *)(rss + 1); 1171 unsigned int tid = GET_TID(cpl); 1172 struct toepcb *toep = lookup_tid(sc, tid); 1173 struct inpcb *inp = toep->inp; 1174 struct tcpcb *tp = NULL; 1175 struct socket *so; 1176 struct epoch_tracker et; 1177 #ifdef INVARIANTS 1178 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1179 #endif 1180 1181 KASSERT(opcode == CPL_PEER_CLOSE, 1182 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1183 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1184 1185 if (__predict_false(toep->flags & TPF_SYNQE)) { 1186 /* 1187 * do_pass_establish must have run before do_peer_close and if 1188 * this is still a synqe instead of a toepcb then the connection 1189 * must be getting aborted. 1190 */ 1191 MPASS(toep->flags & TPF_ABORT_SHUTDOWN); 1192 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1193 toep, toep->flags); 1194 return (0); 1195 } 1196 1197 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1198 1199 CURVNET_SET(toep->vnet); 1200 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 1201 INP_WLOCK(inp); 1202 tp = intotcpcb(inp); 1203 1204 CTR6(KTR_CXGBE, 1205 "%s: tid %u (%s), toep_flags 0x%x, ddp_flags 0x%x, inp %p", 1206 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, 1207 toep->ddp.flags, inp); 1208 1209 if (toep->flags & TPF_ABORT_SHUTDOWN) 1210 goto done; 1211 1212 tp->rcv_nxt++; /* FIN */ 1213 1214 so = inp->inp_socket; 1215 socantrcvmore(so); 1216 if (ulp_mode(toep) == ULP_MODE_TCPDDP) { 1217 DDP_LOCK(toep); 1218 if (__predict_false(toep->ddp.flags & 1219 (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE))) 1220 handle_ddp_close(toep, tp, cpl->rcv_nxt); 1221 DDP_UNLOCK(toep); 1222 } 1223 1224 if (ulp_mode(toep) != ULP_MODE_RDMA) { 1225 KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt), 1226 ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt, 1227 be32toh(cpl->rcv_nxt))); 1228 } 1229 1230 switch (tp->t_state) { 1231 case TCPS_SYN_RECEIVED: 1232 tp->t_starttime = ticks; 1233 /* FALLTHROUGH */ 1234 1235 case TCPS_ESTABLISHED: 1236 tcp_state_change(tp, TCPS_CLOSE_WAIT); 1237 break; 1238 1239 case TCPS_FIN_WAIT_1: 1240 tcp_state_change(tp, TCPS_CLOSING); 1241 break; 1242 1243 case TCPS_FIN_WAIT_2: 1244 tcp_twstart(tp); 1245 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1246 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1247 CURVNET_RESTORE(); 1248 1249 INP_WLOCK(inp); 1250 final_cpl_received(toep); 1251 return (0); 1252 1253 default: 1254 log(LOG_ERR, "%s: TID %u received CPL_PEER_CLOSE in state %d\n", 1255 __func__, tid, tp->t_state); 1256 } 1257 done: 1258 INP_WUNLOCK(inp); 1259 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1260 CURVNET_RESTORE(); 1261 return (0); 1262 } 1263 1264 /* 1265 * Peer has ACK'd our FIN. 1266 */ 1267 static int 1268 do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss, 1269 struct mbuf *m) 1270 { 1271 struct adapter *sc = iq->adapter; 1272 const struct cpl_close_con_rpl *cpl = (const void *)(rss + 1); 1273 unsigned int tid = GET_TID(cpl); 1274 struct toepcb *toep = lookup_tid(sc, tid); 1275 struct inpcb *inp = toep->inp; 1276 struct tcpcb *tp = NULL; 1277 struct socket *so = NULL; 1278 struct epoch_tracker et; 1279 #ifdef INVARIANTS 1280 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1281 #endif 1282 1283 KASSERT(opcode == CPL_CLOSE_CON_RPL, 1284 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1285 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1286 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1287 1288 CURVNET_SET(toep->vnet); 1289 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 1290 INP_WLOCK(inp); 1291 tp = intotcpcb(inp); 1292 1293 CTR4(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x", 1294 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags); 1295 1296 if (toep->flags & TPF_ABORT_SHUTDOWN) 1297 goto done; 1298 1299 so = inp->inp_socket; 1300 tp->snd_una = be32toh(cpl->snd_nxt) - 1; /* exclude FIN */ 1301 1302 switch (tp->t_state) { 1303 case TCPS_CLOSING: /* see TCPS_FIN_WAIT_2 in do_peer_close too */ 1304 tcp_twstart(tp); 1305 release: 1306 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1307 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1308 CURVNET_RESTORE(); 1309 1310 INP_WLOCK(inp); 1311 final_cpl_received(toep); /* no more CPLs expected */ 1312 1313 return (0); 1314 case TCPS_LAST_ACK: 1315 if (tcp_close(tp)) 1316 INP_WUNLOCK(inp); 1317 goto release; 1318 1319 case TCPS_FIN_WAIT_1: 1320 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 1321 soisdisconnected(so); 1322 tcp_state_change(tp, TCPS_FIN_WAIT_2); 1323 break; 1324 1325 default: 1326 log(LOG_ERR, 1327 "%s: TID %u received CPL_CLOSE_CON_RPL in state %s\n", 1328 __func__, tid, tcpstates[tp->t_state]); 1329 } 1330 done: 1331 INP_WUNLOCK(inp); 1332 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1333 CURVNET_RESTORE(); 1334 return (0); 1335 } 1336 1337 void 1338 send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid, 1339 int rst_status) 1340 { 1341 struct wrqe *wr; 1342 struct cpl_abort_rpl *cpl; 1343 1344 wr = alloc_wrqe(sizeof(*cpl), ofld_txq); 1345 if (wr == NULL) { 1346 /* XXX */ 1347 panic("%s: allocation failure.", __func__); 1348 } 1349 cpl = wrtod(wr); 1350 1351 INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid); 1352 cpl->cmd = rst_status; 1353 1354 t4_wrq_tx(sc, wr); 1355 } 1356 1357 static int 1358 abort_status_to_errno(struct tcpcb *tp, unsigned int abort_reason) 1359 { 1360 switch (abort_reason) { 1361 case CPL_ERR_BAD_SYN: 1362 case CPL_ERR_CONN_RESET: 1363 return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET); 1364 case CPL_ERR_XMIT_TIMEDOUT: 1365 case CPL_ERR_PERSIST_TIMEDOUT: 1366 case CPL_ERR_FINWAIT2_TIMEDOUT: 1367 case CPL_ERR_KEEPALIVE_TIMEDOUT: 1368 return (ETIMEDOUT); 1369 default: 1370 return (EIO); 1371 } 1372 } 1373 1374 /* 1375 * TCP RST from the peer, timeout, or some other such critical error. 1376 */ 1377 static int 1378 do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1379 { 1380 struct adapter *sc = iq->adapter; 1381 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); 1382 unsigned int tid = GET_TID(cpl); 1383 struct toepcb *toep = lookup_tid(sc, tid); 1384 struct sge_wrq *ofld_txq = toep->ofld_txq; 1385 struct inpcb *inp; 1386 struct tcpcb *tp; 1387 struct epoch_tracker et; 1388 #ifdef INVARIANTS 1389 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1390 #endif 1391 1392 KASSERT(opcode == CPL_ABORT_REQ_RSS, 1393 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1394 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1395 1396 if (toep->flags & TPF_SYNQE) 1397 return (do_abort_req_synqe(iq, rss, m)); 1398 1399 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1400 1401 if (negative_advice(cpl->status)) { 1402 CTR4(KTR_CXGBE, "%s: negative advice %d for tid %d (0x%x)", 1403 __func__, cpl->status, tid, toep->flags); 1404 return (0); /* Ignore negative advice */ 1405 } 1406 1407 inp = toep->inp; 1408 CURVNET_SET(toep->vnet); 1409 INP_INFO_RLOCK_ET(&V_tcbinfo, et); /* for tcp_close */ 1410 INP_WLOCK(inp); 1411 1412 tp = intotcpcb(inp); 1413 1414 CTR6(KTR_CXGBE, 1415 "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x, status %d", 1416 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, 1417 inp->inp_flags, cpl->status); 1418 1419 /* 1420 * If we'd initiated an abort earlier the reply to it is responsible for 1421 * cleaning up resources. Otherwise we tear everything down right here 1422 * right now. We owe the T4 a CPL_ABORT_RPL no matter what. 1423 */ 1424 if (toep->flags & TPF_ABORT_SHUTDOWN) { 1425 INP_WUNLOCK(inp); 1426 goto done; 1427 } 1428 toep->flags |= TPF_ABORT_SHUTDOWN; 1429 1430 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 1431 struct socket *so = inp->inp_socket; 1432 1433 if (so != NULL) 1434 so_error_set(so, abort_status_to_errno(tp, 1435 cpl->status)); 1436 tp = tcp_close(tp); 1437 if (tp == NULL) 1438 INP_WLOCK(inp); /* re-acquire */ 1439 } 1440 1441 final_cpl_received(toep); 1442 done: 1443 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1444 CURVNET_RESTORE(); 1445 send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST); 1446 return (0); 1447 } 1448 1449 /* 1450 * Reply to the CPL_ABORT_REQ (send_reset) 1451 */ 1452 static int 1453 do_abort_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1454 { 1455 struct adapter *sc = iq->adapter; 1456 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); 1457 unsigned int tid = GET_TID(cpl); 1458 struct toepcb *toep = lookup_tid(sc, tid); 1459 struct inpcb *inp = toep->inp; 1460 #ifdef INVARIANTS 1461 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1462 #endif 1463 1464 KASSERT(opcode == CPL_ABORT_RPL_RSS, 1465 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1466 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1467 1468 if (toep->flags & TPF_SYNQE) 1469 return (do_abort_rpl_synqe(iq, rss, m)); 1470 1471 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1472 1473 CTR5(KTR_CXGBE, "%s: tid %u, toep %p, inp %p, status %d", 1474 __func__, tid, toep, inp, cpl->status); 1475 1476 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1477 ("%s: wasn't expecting abort reply", __func__)); 1478 1479 INP_WLOCK(inp); 1480 final_cpl_received(toep); 1481 1482 return (0); 1483 } 1484 1485 static int 1486 do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1487 { 1488 struct adapter *sc = iq->adapter; 1489 const struct cpl_rx_data *cpl = mtod(m, const void *); 1490 unsigned int tid = GET_TID(cpl); 1491 struct toepcb *toep = lookup_tid(sc, tid); 1492 struct inpcb *inp = toep->inp; 1493 struct tcpcb *tp; 1494 struct socket *so; 1495 struct sockbuf *sb; 1496 struct epoch_tracker et; 1497 int len, rx_credits; 1498 uint32_t ddp_placed = 0; 1499 1500 if (__predict_false(toep->flags & TPF_SYNQE)) { 1501 /* 1502 * do_pass_establish must have run before do_rx_data and if this 1503 * is still a synqe instead of a toepcb then the connection must 1504 * be getting aborted. 1505 */ 1506 MPASS(toep->flags & TPF_ABORT_SHUTDOWN); 1507 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1508 toep, toep->flags); 1509 m_freem(m); 1510 return (0); 1511 } 1512 1513 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1514 1515 /* strip off CPL header */ 1516 m_adj(m, sizeof(*cpl)); 1517 len = m->m_pkthdr.len; 1518 1519 INP_WLOCK(inp); 1520 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) { 1521 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", 1522 __func__, tid, len, inp->inp_flags); 1523 INP_WUNLOCK(inp); 1524 m_freem(m); 1525 return (0); 1526 } 1527 1528 tp = intotcpcb(inp); 1529 1530 if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq))) 1531 ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt; 1532 1533 tp->rcv_nxt += len; 1534 if (tp->rcv_wnd < len) { 1535 KASSERT(ulp_mode(toep) == ULP_MODE_RDMA, 1536 ("%s: negative window size", __func__)); 1537 } 1538 1539 tp->rcv_wnd -= len; 1540 tp->t_rcvtime = ticks; 1541 1542 if (ulp_mode(toep) == ULP_MODE_TCPDDP) 1543 DDP_LOCK(toep); 1544 so = inp_inpcbtosocket(inp); 1545 sb = &so->so_rcv; 1546 SOCKBUF_LOCK(sb); 1547 1548 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 1549 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)", 1550 __func__, tid, len); 1551 m_freem(m); 1552 SOCKBUF_UNLOCK(sb); 1553 if (ulp_mode(toep) == ULP_MODE_TCPDDP) 1554 DDP_UNLOCK(toep); 1555 INP_WUNLOCK(inp); 1556 1557 CURVNET_SET(toep->vnet); 1558 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 1559 INP_WLOCK(inp); 1560 tp = tcp_drop(tp, ECONNRESET); 1561 if (tp) 1562 INP_WUNLOCK(inp); 1563 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1564 CURVNET_RESTORE(); 1565 1566 return (0); 1567 } 1568 1569 /* receive buffer autosize */ 1570 MPASS(toep->vnet == so->so_vnet); 1571 CURVNET_SET(toep->vnet); 1572 if (sb->sb_flags & SB_AUTOSIZE && 1573 V_tcp_do_autorcvbuf && 1574 sb->sb_hiwat < V_tcp_autorcvbuf_max && 1575 len > (sbspace(sb) / 8 * 7)) { 1576 unsigned int hiwat = sb->sb_hiwat; 1577 unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc, 1578 V_tcp_autorcvbuf_max); 1579 1580 if (!sbreserve_locked(sb, newsize, so, NULL)) 1581 sb->sb_flags &= ~SB_AUTOSIZE; 1582 } 1583 1584 if (ulp_mode(toep) == ULP_MODE_TCPDDP) { 1585 int changed = !(toep->ddp.flags & DDP_ON) ^ cpl->ddp_off; 1586 1587 if (toep->ddp.waiting_count != 0 || toep->ddp.active_count != 0) 1588 CTR3(KTR_CXGBE, "%s: tid %u, non-ddp rx (%d bytes)", 1589 __func__, tid, len); 1590 1591 if (changed) { 1592 if (toep->ddp.flags & DDP_SC_REQ) 1593 toep->ddp.flags ^= DDP_ON | DDP_SC_REQ; 1594 else { 1595 KASSERT(cpl->ddp_off == 1, 1596 ("%s: DDP switched on by itself.", 1597 __func__)); 1598 1599 /* Fell out of DDP mode */ 1600 toep->ddp.flags &= ~DDP_ON; 1601 CTR1(KTR_CXGBE, "%s: fell out of DDP mode", 1602 __func__); 1603 1604 insert_ddp_data(toep, ddp_placed); 1605 } 1606 } 1607 1608 if (toep->ddp.flags & DDP_ON) { 1609 /* 1610 * CPL_RX_DATA with DDP on can only be an indicate. 1611 * Start posting queued AIO requests via DDP. The 1612 * payload that arrived in this indicate is appended 1613 * to the socket buffer as usual. 1614 */ 1615 handle_ddp_indicate(toep); 1616 } 1617 } 1618 1619 sbappendstream_locked(sb, m, 0); 1620 rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0; 1621 if (rx_credits > 0 && sbused(sb) + tp->rcv_wnd < sb->sb_lowat) { 1622 rx_credits = send_rx_credits(sc, toep, rx_credits); 1623 tp->rcv_wnd += rx_credits; 1624 tp->rcv_adv += rx_credits; 1625 } 1626 1627 if (ulp_mode(toep) == ULP_MODE_TCPDDP && toep->ddp.waiting_count > 0 && 1628 sbavail(sb) != 0) { 1629 CTR2(KTR_CXGBE, "%s: tid %u queueing AIO task", __func__, 1630 tid); 1631 ddp_queue_toep(toep); 1632 } 1633 sorwakeup_locked(so); 1634 SOCKBUF_UNLOCK_ASSERT(sb); 1635 if (ulp_mode(toep) == ULP_MODE_TCPDDP) 1636 DDP_UNLOCK(toep); 1637 1638 INP_WUNLOCK(inp); 1639 CURVNET_RESTORE(); 1640 return (0); 1641 } 1642 1643 static int 1644 do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1645 { 1646 struct adapter *sc = iq->adapter; 1647 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 1648 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 1649 struct toepcb *toep = lookup_tid(sc, tid); 1650 struct inpcb *inp; 1651 struct tcpcb *tp; 1652 struct socket *so; 1653 uint8_t credits = cpl->credits; 1654 struct ofld_tx_sdesc *txsd; 1655 int plen; 1656 #ifdef INVARIANTS 1657 unsigned int opcode = G_CPL_FW4_ACK_OPCODE(be32toh(OPCODE_TID(cpl))); 1658 #endif 1659 1660 /* 1661 * Very unusual case: we'd sent a flowc + abort_req for a synq entry and 1662 * now this comes back carrying the credits for the flowc. 1663 */ 1664 if (__predict_false(toep->flags & TPF_SYNQE)) { 1665 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1666 ("%s: credits for a synq entry %p", __func__, toep)); 1667 return (0); 1668 } 1669 1670 inp = toep->inp; 1671 1672 KASSERT(opcode == CPL_FW4_ACK, 1673 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1674 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1675 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1676 1677 INP_WLOCK(inp); 1678 1679 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) { 1680 INP_WUNLOCK(inp); 1681 return (0); 1682 } 1683 1684 KASSERT((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0, 1685 ("%s: inp_flags 0x%x", __func__, inp->inp_flags)); 1686 1687 tp = intotcpcb(inp); 1688 1689 if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) { 1690 tcp_seq snd_una = be32toh(cpl->snd_una); 1691 1692 #ifdef INVARIANTS 1693 if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) { 1694 log(LOG_ERR, 1695 "%s: unexpected seq# %x for TID %u, snd_una %x\n", 1696 __func__, snd_una, toep->tid, tp->snd_una); 1697 } 1698 #endif 1699 1700 if (tp->snd_una != snd_una) { 1701 tp->snd_una = snd_una; 1702 tp->ts_recent_age = tcp_ts_getticks(); 1703 } 1704 } 1705 1706 #ifdef VERBOSE_TRACES 1707 CTR3(KTR_CXGBE, "%s: tid %d credits %u", __func__, tid, credits); 1708 #endif 1709 so = inp->inp_socket; 1710 txsd = &toep->txsd[toep->txsd_cidx]; 1711 plen = 0; 1712 while (credits) { 1713 KASSERT(credits >= txsd->tx_credits, 1714 ("%s: too many (or partial) credits", __func__)); 1715 credits -= txsd->tx_credits; 1716 toep->tx_credits += txsd->tx_credits; 1717 plen += txsd->plen; 1718 if (txsd->iv_buffer) { 1719 free(txsd->iv_buffer, M_CXGBE); 1720 txsd->iv_buffer = NULL; 1721 } 1722 txsd++; 1723 toep->txsd_avail++; 1724 KASSERT(toep->txsd_avail <= toep->txsd_total, 1725 ("%s: txsd avail > total", __func__)); 1726 if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) { 1727 txsd = &toep->txsd[0]; 1728 toep->txsd_cidx = 0; 1729 } 1730 } 1731 1732 if (toep->tx_credits == toep->tx_total) { 1733 toep->tx_nocompl = 0; 1734 toep->plen_nocompl = 0; 1735 } 1736 1737 if (toep->flags & TPF_TX_SUSPENDED && 1738 toep->tx_credits >= toep->tx_total / 4) { 1739 #ifdef VERBOSE_TRACES 1740 CTR2(KTR_CXGBE, "%s: tid %d calling t4_push_frames", __func__, 1741 tid); 1742 #endif 1743 toep->flags &= ~TPF_TX_SUSPENDED; 1744 CURVNET_SET(toep->vnet); 1745 if (ulp_mode(toep) == ULP_MODE_ISCSI) 1746 t4_push_pdus(sc, toep, plen); 1747 else if (tls_tx_key(toep)) 1748 t4_push_tls_records(sc, toep, plen); 1749 else 1750 t4_push_frames(sc, toep, plen); 1751 CURVNET_RESTORE(); 1752 } else if (plen > 0) { 1753 struct sockbuf *sb = &so->so_snd; 1754 int sbu; 1755 1756 SOCKBUF_LOCK(sb); 1757 sbu = sbused(sb); 1758 if (ulp_mode(toep) == ULP_MODE_ISCSI) { 1759 1760 if (__predict_false(sbu > 0)) { 1761 /* 1762 * The data trasmitted before the tid's ULP mode 1763 * changed to ISCSI is still in so_snd. 1764 * Incoming credits should account for so_snd 1765 * first. 1766 */ 1767 sbdrop_locked(sb, min(sbu, plen)); 1768 plen -= min(sbu, plen); 1769 } 1770 sowwakeup_locked(so); /* unlocks so_snd */ 1771 rqdrop_locked(&toep->ulp_pdu_reclaimq, plen); 1772 } else { 1773 #ifdef VERBOSE_TRACES 1774 CTR3(KTR_CXGBE, "%s: tid %d dropped %d bytes", __func__, 1775 tid, plen); 1776 #endif 1777 sbdrop_locked(sb, plen); 1778 if (tls_tx_key(toep)) { 1779 struct tls_ofld_info *tls_ofld = &toep->tls; 1780 1781 MPASS(tls_ofld->sb_off >= plen); 1782 tls_ofld->sb_off -= plen; 1783 } 1784 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) 1785 t4_aiotx_queue_toep(so, toep); 1786 sowwakeup_locked(so); /* unlocks so_snd */ 1787 } 1788 SOCKBUF_UNLOCK_ASSERT(sb); 1789 } 1790 1791 INP_WUNLOCK(inp); 1792 1793 return (0); 1794 } 1795 1796 void 1797 t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, struct toepcb *toep, 1798 uint16_t word, uint64_t mask, uint64_t val, int reply, int cookie) 1799 { 1800 struct wrqe *wr; 1801 struct cpl_set_tcb_field *req; 1802 struct ofld_tx_sdesc *txsd; 1803 1804 MPASS((cookie & ~M_COOKIE) == 0); 1805 if (reply) { 1806 MPASS(cookie != CPL_COOKIE_RESERVED); 1807 } 1808 1809 wr = alloc_wrqe(sizeof(*req), wrq); 1810 if (wr == NULL) { 1811 /* XXX */ 1812 panic("%s: allocation failure.", __func__); 1813 } 1814 req = wrtod(wr); 1815 1816 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid); 1817 req->reply_ctrl = htobe16(V_QUEUENO(toep->ofld_rxq->iq.abs_id)); 1818 if (reply == 0) 1819 req->reply_ctrl |= htobe16(F_NO_REPLY); 1820 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie)); 1821 req->mask = htobe64(mask); 1822 req->val = htobe64(val); 1823 if ((wrq->eq.flags & EQ_TYPEMASK) == EQ_OFLD) { 1824 txsd = &toep->txsd[toep->txsd_pidx]; 1825 txsd->tx_credits = howmany(sizeof(*req), 16); 1826 txsd->plen = 0; 1827 KASSERT(toep->tx_credits >= txsd->tx_credits && 1828 toep->txsd_avail > 0, 1829 ("%s: not enough credits (%d)", __func__, 1830 toep->tx_credits)); 1831 toep->tx_credits -= txsd->tx_credits; 1832 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 1833 toep->txsd_pidx = 0; 1834 toep->txsd_avail--; 1835 } 1836 1837 t4_wrq_tx(sc, wr); 1838 } 1839 1840 void 1841 t4_init_cpl_io_handlers(void) 1842 { 1843 1844 t4_register_cpl_handler(CPL_PEER_CLOSE, do_peer_close); 1845 t4_register_cpl_handler(CPL_CLOSE_CON_RPL, do_close_con_rpl); 1846 t4_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req); 1847 t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl, 1848 CPL_COOKIE_TOM); 1849 t4_register_cpl_handler(CPL_RX_DATA, do_rx_data); 1850 t4_register_shared_cpl_handler(CPL_FW4_ACK, do_fw4_ack, CPL_COOKIE_TOM); 1851 } 1852 1853 void 1854 t4_uninit_cpl_io_handlers(void) 1855 { 1856 1857 t4_register_cpl_handler(CPL_PEER_CLOSE, NULL); 1858 t4_register_cpl_handler(CPL_CLOSE_CON_RPL, NULL); 1859 t4_register_cpl_handler(CPL_ABORT_REQ_RSS, NULL); 1860 t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS, NULL, CPL_COOKIE_TOM); 1861 t4_register_cpl_handler(CPL_RX_DATA, NULL); 1862 t4_register_shared_cpl_handler(CPL_FW4_ACK, NULL, CPL_COOKIE_TOM); 1863 } 1864 1865 /* 1866 * Use the 'backend1' field in AIO jobs to hold an error that should 1867 * be reported when the job is completed, the 'backend3' field to 1868 * store the amount of data sent by the AIO job so far, and the 1869 * 'backend4' field to hold a reference count on the job. 1870 * 1871 * Each unmapped mbuf holds a reference on the job as does the queue 1872 * so long as the job is queued. 1873 */ 1874 #define aio_error backend1 1875 #define aio_sent backend3 1876 #define aio_refs backend4 1877 1878 #define jobtotid(job) \ 1879 (((struct toepcb *)(so_sototcpcb((job)->fd_file->f_data)->t_toe))->tid) 1880 1881 static void 1882 aiotx_free_job(struct kaiocb *job) 1883 { 1884 long status; 1885 int error; 1886 1887 if (refcount_release(&job->aio_refs) == 0) 1888 return; 1889 1890 error = (intptr_t)job->aio_error; 1891 status = job->aio_sent; 1892 #ifdef VERBOSE_TRACES 1893 CTR5(KTR_CXGBE, "%s: tid %d completed %p len %ld, error %d", __func__, 1894 jobtotid(job), job, status, error); 1895 #endif 1896 if (error != 0 && status != 0) 1897 error = 0; 1898 if (error == ECANCELED) 1899 aio_cancel(job); 1900 else if (error) 1901 aio_complete(job, -1, error); 1902 else { 1903 job->msgsnd = 1; 1904 aio_complete(job, status, 0); 1905 } 1906 } 1907 1908 static void 1909 aiotx_free_pgs(struct mbuf *m) 1910 { 1911 struct mbuf_ext_pgs *ext_pgs; 1912 struct kaiocb *job; 1913 struct mtx *mtx; 1914 vm_page_t pg; 1915 1916 MBUF_EXT_PGS_ASSERT(m); 1917 ext_pgs = m->m_ext.ext_pgs; 1918 job = m->m_ext.ext_arg1; 1919 #ifdef VERBOSE_TRACES 1920 CTR3(KTR_CXGBE, "%s: completed %d bytes for tid %d", __func__, 1921 m->m_len, jobtotid(job)); 1922 #endif 1923 1924 mtx = NULL; 1925 for (int i = 0; i < ext_pgs->npgs; i++) { 1926 pg = PHYS_TO_VM_PAGE(ext_pgs->pa[i]); 1927 vm_page_change_lock(pg, &mtx); 1928 vm_page_unwire(pg, PQ_ACTIVE); 1929 } 1930 if (mtx != NULL) 1931 mtx_unlock(mtx); 1932 1933 aiotx_free_job(job); 1934 } 1935 1936 /* 1937 * Allocate a chain of unmapped mbufs describing the next 'len' bytes 1938 * of an AIO job. 1939 */ 1940 static struct mbuf * 1941 alloc_aiotx_mbuf(struct kaiocb *job, int len) 1942 { 1943 struct vmspace *vm; 1944 vm_page_t pgs[MBUF_PEXT_MAX_PGS]; 1945 struct mbuf *m, *top, *last; 1946 struct mbuf_ext_pgs *ext_pgs; 1947 vm_map_t map; 1948 vm_offset_t start; 1949 int i, mlen, npages, pgoff; 1950 1951 KASSERT(job->aio_sent + len <= job->uaiocb.aio_nbytes, 1952 ("%s(%p, %d): request to send beyond end of buffer", __func__, 1953 job, len)); 1954 1955 /* 1956 * The AIO subsystem will cancel and drain all requests before 1957 * permitting a process to exit or exec, so p_vmspace should 1958 * be stable here. 1959 */ 1960 vm = job->userproc->p_vmspace; 1961 map = &vm->vm_map; 1962 start = (uintptr_t)job->uaiocb.aio_buf + job->aio_sent; 1963 pgoff = start & PAGE_MASK; 1964 1965 top = NULL; 1966 last = NULL; 1967 while (len > 0) { 1968 mlen = imin(len, MBUF_PEXT_MAX_PGS * PAGE_SIZE - pgoff); 1969 KASSERT(mlen == len || ((start + mlen) & PAGE_MASK) == 0, 1970 ("%s: next start (%#jx + %#x) is not page aligned", 1971 __func__, (uintmax_t)start, mlen)); 1972 1973 npages = vm_fault_quick_hold_pages(map, start, mlen, 1974 VM_PROT_WRITE, pgs, nitems(pgs)); 1975 if (npages < 0) 1976 break; 1977 1978 m = mb_alloc_ext_pgs(M_WAITOK, false, aiotx_free_pgs); 1979 if (m == NULL) { 1980 vm_page_unhold_pages(pgs, npages); 1981 break; 1982 } 1983 1984 ext_pgs = m->m_ext.ext_pgs; 1985 ext_pgs->first_pg_off = pgoff; 1986 ext_pgs->npgs = npages; 1987 if (npages == 1) { 1988 KASSERT(mlen + pgoff <= PAGE_SIZE, 1989 ("%s: single page is too large (off %d len %d)", 1990 __func__, pgoff, mlen)); 1991 ext_pgs->last_pg_len = mlen; 1992 } else { 1993 ext_pgs->last_pg_len = mlen - (PAGE_SIZE - pgoff) - 1994 (npages - 2) * PAGE_SIZE; 1995 } 1996 for (i = 0; i < npages; i++) 1997 ext_pgs->pa[i] = VM_PAGE_TO_PHYS(pgs[i]); 1998 1999 m->m_len = mlen; 2000 m->m_ext.ext_size = npages * PAGE_SIZE; 2001 m->m_ext.ext_arg1 = job; 2002 refcount_acquire(&job->aio_refs); 2003 2004 #ifdef VERBOSE_TRACES 2005 CTR5(KTR_CXGBE, "%s: tid %d, new mbuf %p for job %p, npages %d", 2006 __func__, jobtotid(job), m, job, npages); 2007 #endif 2008 2009 if (top == NULL) 2010 top = m; 2011 else 2012 last->m_next = m; 2013 last = m; 2014 2015 len -= mlen; 2016 start += mlen; 2017 pgoff = 0; 2018 } 2019 2020 return (top); 2021 } 2022 2023 static void 2024 t4_aiotx_process_job(struct toepcb *toep, struct socket *so, struct kaiocb *job) 2025 { 2026 struct sockbuf *sb; 2027 struct file *fp; 2028 struct inpcb *inp; 2029 struct tcpcb *tp; 2030 struct mbuf *m; 2031 int error, len; 2032 bool moretocome, sendmore; 2033 2034 sb = &so->so_snd; 2035 SOCKBUF_UNLOCK(sb); 2036 fp = job->fd_file; 2037 m = NULL; 2038 2039 #ifdef MAC 2040 error = mac_socket_check_send(fp->f_cred, so); 2041 if (error != 0) 2042 goto out; 2043 #endif 2044 2045 /* Inline sosend_generic(). */ 2046 2047 error = sblock(sb, SBL_WAIT); 2048 MPASS(error == 0); 2049 2050 sendanother: 2051 SOCKBUF_LOCK(sb); 2052 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 2053 SOCKBUF_UNLOCK(sb); 2054 sbunlock(sb); 2055 if ((so->so_options & SO_NOSIGPIPE) == 0) { 2056 PROC_LOCK(job->userproc); 2057 kern_psignal(job->userproc, SIGPIPE); 2058 PROC_UNLOCK(job->userproc); 2059 } 2060 error = EPIPE; 2061 goto out; 2062 } 2063 if (so->so_error) { 2064 error = so->so_error; 2065 so->so_error = 0; 2066 SOCKBUF_UNLOCK(sb); 2067 sbunlock(sb); 2068 goto out; 2069 } 2070 if ((so->so_state & SS_ISCONNECTED) == 0) { 2071 SOCKBUF_UNLOCK(sb); 2072 sbunlock(sb); 2073 error = ENOTCONN; 2074 goto out; 2075 } 2076 if (sbspace(sb) < sb->sb_lowat) { 2077 MPASS(job->aio_sent == 0 || !(so->so_state & SS_NBIO)); 2078 2079 /* 2080 * Don't block if there is too little room in the socket 2081 * buffer. Instead, requeue the request. 2082 */ 2083 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) { 2084 SOCKBUF_UNLOCK(sb); 2085 sbunlock(sb); 2086 error = ECANCELED; 2087 goto out; 2088 } 2089 TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list); 2090 SOCKBUF_UNLOCK(sb); 2091 sbunlock(sb); 2092 goto out; 2093 } 2094 2095 /* 2096 * Write as much data as the socket permits, but no more than a 2097 * a single sndbuf at a time. 2098 */ 2099 len = sbspace(sb); 2100 if (len > job->uaiocb.aio_nbytes - job->aio_sent) { 2101 len = job->uaiocb.aio_nbytes - job->aio_sent; 2102 moretocome = false; 2103 } else 2104 moretocome = true; 2105 if (len > toep->params.sndbuf) { 2106 len = toep->params.sndbuf; 2107 sendmore = true; 2108 } else 2109 sendmore = false; 2110 2111 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) 2112 moretocome = true; 2113 SOCKBUF_UNLOCK(sb); 2114 MPASS(len != 0); 2115 2116 m = alloc_aiotx_mbuf(job, len); 2117 if (m == NULL) { 2118 sbunlock(sb); 2119 error = EFAULT; 2120 goto out; 2121 } 2122 2123 /* Inlined tcp_usr_send(). */ 2124 2125 inp = toep->inp; 2126 INP_WLOCK(inp); 2127 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 2128 INP_WUNLOCK(inp); 2129 sbunlock(sb); 2130 error = ECONNRESET; 2131 goto out; 2132 } 2133 2134 job->aio_sent += m_length(m, NULL); 2135 2136 sbappendstream(sb, m, 0); 2137 m = NULL; 2138 2139 if (!(inp->inp_flags & INP_DROPPED)) { 2140 tp = intotcpcb(inp); 2141 if (moretocome) 2142 tp->t_flags |= TF_MORETOCOME; 2143 error = tp->t_fb->tfb_tcp_output(tp); 2144 if (moretocome) 2145 tp->t_flags &= ~TF_MORETOCOME; 2146 } 2147 2148 INP_WUNLOCK(inp); 2149 if (sendmore) 2150 goto sendanother; 2151 sbunlock(sb); 2152 2153 if (error) 2154 goto out; 2155 2156 /* 2157 * If this is a blocking socket and the request has not been 2158 * fully completed, requeue it until the socket is ready 2159 * again. 2160 */ 2161 if (job->aio_sent < job->uaiocb.aio_nbytes && 2162 !(so->so_state & SS_NBIO)) { 2163 SOCKBUF_LOCK(sb); 2164 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) { 2165 SOCKBUF_UNLOCK(sb); 2166 error = ECANCELED; 2167 goto out; 2168 } 2169 TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list); 2170 return; 2171 } 2172 2173 /* 2174 * If the request will not be requeued, drop the queue's 2175 * reference to the job. Any mbufs in flight should still 2176 * hold a reference, but this drops the reference that the 2177 * queue owns while it is waiting to queue mbufs to the 2178 * socket. 2179 */ 2180 aiotx_free_job(job); 2181 2182 out: 2183 if (error) { 2184 job->aio_error = (void *)(intptr_t)error; 2185 aiotx_free_job(job); 2186 } 2187 if (m != NULL) 2188 m_free(m); 2189 SOCKBUF_LOCK(sb); 2190 } 2191 2192 static void 2193 t4_aiotx_task(void *context, int pending) 2194 { 2195 struct toepcb *toep = context; 2196 struct socket *so; 2197 struct kaiocb *job; 2198 2199 so = toep->aiotx_so; 2200 CURVNET_SET(toep->vnet); 2201 SOCKBUF_LOCK(&so->so_snd); 2202 while (!TAILQ_EMPTY(&toep->aiotx_jobq) && sowriteable(so)) { 2203 job = TAILQ_FIRST(&toep->aiotx_jobq); 2204 TAILQ_REMOVE(&toep->aiotx_jobq, job, list); 2205 if (!aio_clear_cancel_function(job)) 2206 continue; 2207 2208 t4_aiotx_process_job(toep, so, job); 2209 } 2210 toep->aiotx_so = NULL; 2211 SOCKBUF_UNLOCK(&so->so_snd); 2212 CURVNET_RESTORE(); 2213 2214 free_toepcb(toep); 2215 SOCK_LOCK(so); 2216 sorele(so); 2217 } 2218 2219 static void 2220 t4_aiotx_queue_toep(struct socket *so, struct toepcb *toep) 2221 { 2222 2223 SOCKBUF_LOCK_ASSERT(&toep->inp->inp_socket->so_snd); 2224 #ifdef VERBOSE_TRACES 2225 CTR3(KTR_CXGBE, "%s: queueing aiotx task for tid %d, active = %s", 2226 __func__, toep->tid, toep->aiotx_so != NULL ? "true" : "false"); 2227 #endif 2228 if (toep->aiotx_so != NULL) 2229 return; 2230 soref(so); 2231 toep->aiotx_so = so; 2232 hold_toepcb(toep); 2233 soaio_enqueue(&toep->aiotx_task); 2234 } 2235 2236 static void 2237 t4_aiotx_cancel(struct kaiocb *job) 2238 { 2239 struct socket *so; 2240 struct sockbuf *sb; 2241 struct tcpcb *tp; 2242 struct toepcb *toep; 2243 2244 so = job->fd_file->f_data; 2245 tp = so_sototcpcb(so); 2246 toep = tp->t_toe; 2247 MPASS(job->uaiocb.aio_lio_opcode == LIO_WRITE); 2248 sb = &so->so_snd; 2249 2250 SOCKBUF_LOCK(sb); 2251 if (!aio_cancel_cleared(job)) 2252 TAILQ_REMOVE(&toep->aiotx_jobq, job, list); 2253 SOCKBUF_UNLOCK(sb); 2254 2255 job->aio_error = (void *)(intptr_t)ECANCELED; 2256 aiotx_free_job(job); 2257 } 2258 2259 int 2260 t4_aio_queue_aiotx(struct socket *so, struct kaiocb *job) 2261 { 2262 struct tcpcb *tp = so_sototcpcb(so); 2263 struct toepcb *toep = tp->t_toe; 2264 struct adapter *sc = td_adapter(toep->td); 2265 2266 /* This only handles writes. */ 2267 if (job->uaiocb.aio_lio_opcode != LIO_WRITE) 2268 return (EOPNOTSUPP); 2269 2270 if (!sc->tt.tx_zcopy) 2271 return (EOPNOTSUPP); 2272 2273 if (tls_tx_key(toep)) 2274 return (EOPNOTSUPP); 2275 2276 SOCKBUF_LOCK(&so->so_snd); 2277 #ifdef VERBOSE_TRACES 2278 CTR3(KTR_CXGBE, "%s: queueing %p for tid %u", __func__, job, toep->tid); 2279 #endif 2280 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) 2281 panic("new job was cancelled"); 2282 refcount_init(&job->aio_refs, 1); 2283 TAILQ_INSERT_TAIL(&toep->aiotx_jobq, job, list); 2284 if (sowriteable(so)) 2285 t4_aiotx_queue_toep(so, toep); 2286 SOCKBUF_UNLOCK(&so->so_snd); 2287 return (0); 2288 } 2289 2290 void 2291 aiotx_init_toep(struct toepcb *toep) 2292 { 2293 2294 TAILQ_INIT(&toep->aiotx_jobq); 2295 TASK_INIT(&toep->aiotx_task, 0, t4_aiotx_task, toep); 2296 } 2297 #endif 2298