1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012, 2015 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 #include "opt_ratelimit.h" 36 37 #ifdef TCP_OFFLOAD 38 #include <sys/param.h> 39 #include <sys/aio.h> 40 #include <sys/file.h> 41 #include <sys/kernel.h> 42 #include <sys/ktr.h> 43 #include <sys/module.h> 44 #include <sys/proc.h> 45 #include <sys/protosw.h> 46 #include <sys/domain.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/sglist.h> 50 #include <sys/taskqueue.h> 51 #include <netinet/in.h> 52 #include <netinet/in_pcb.h> 53 #include <netinet/ip.h> 54 #include <netinet/ip6.h> 55 #define TCPSTATES 56 #include <netinet/tcp_fsm.h> 57 #include <netinet/tcp_seq.h> 58 #include <netinet/tcp_var.h> 59 #include <netinet/toecore.h> 60 61 #include <security/mac/mac_framework.h> 62 63 #include <vm/vm.h> 64 #include <vm/vm_extern.h> 65 #include <vm/pmap.h> 66 #include <vm/vm_map.h> 67 #include <vm/vm_page.h> 68 69 #include "common/common.h" 70 #include "common/t4_msg.h" 71 #include "common/t4_regs.h" 72 #include "common/t4_tcb.h" 73 #include "tom/t4_tom_l2t.h" 74 #include "tom/t4_tom.h" 75 76 static void t4_aiotx_cancel(struct kaiocb *job); 77 static void t4_aiotx_queue_toep(struct toepcb *toep); 78 79 static size_t 80 aiotx_mbuf_pgoff(struct mbuf *m) 81 { 82 struct aiotx_buffer *ab; 83 84 MPASS(IS_AIOTX_MBUF(m)); 85 ab = m->m_ext.ext_arg1; 86 return ((ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) % PAGE_SIZE); 87 } 88 89 static vm_page_t * 90 aiotx_mbuf_pages(struct mbuf *m) 91 { 92 struct aiotx_buffer *ab; 93 int npages; 94 95 MPASS(IS_AIOTX_MBUF(m)); 96 ab = m->m_ext.ext_arg1; 97 npages = (ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) / PAGE_SIZE; 98 return (ab->ps.pages + npages); 99 } 100 101 void 102 send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp) 103 { 104 struct wrqe *wr; 105 struct fw_flowc_wr *flowc; 106 unsigned int nparams, flowclen, paramidx; 107 struct vi_info *vi = toep->vi; 108 struct port_info *pi = vi->pi; 109 struct adapter *sc = pi->adapter; 110 unsigned int pfvf = G_FW_VIID_PFN(vi->viid) << S_FW_VIID_PFN; 111 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 112 113 KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT), 114 ("%s: flowc for tid %u sent already", __func__, toep->tid)); 115 116 if (ftxp != NULL) 117 nparams = 8; 118 else 119 nparams = 6; 120 if (toep->ulp_mode == ULP_MODE_TLS) 121 nparams++; 122 if (toep->tls.fcplenmax != 0) 123 nparams++; 124 if (toep->tc_idx != -1) { 125 MPASS(toep->tc_idx >= 0 && 126 toep->tc_idx < sc->chip_params->nsched_cls); 127 nparams++; 128 } 129 130 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); 131 132 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq); 133 if (wr == NULL) { 134 /* XXX */ 135 panic("%s: allocation failure.", __func__); 136 } 137 flowc = wrtod(wr); 138 memset(flowc, 0, wr->wr_len); 139 140 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 141 V_FW_FLOWC_WR_NPARAMS(nparams)); 142 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | 143 V_FW_WR_FLOWID(toep->tid)); 144 145 #define FLOWC_PARAM(__m, __v) \ 146 do { \ 147 flowc->mnemval[paramidx].mnemonic = FW_FLOWC_MNEM_##__m; \ 148 flowc->mnemval[paramidx].val = htobe32(__v); \ 149 paramidx++; \ 150 } while (0) 151 152 paramidx = 0; 153 154 FLOWC_PARAM(PFNVFN, pfvf); 155 FLOWC_PARAM(CH, pi->tx_chan); 156 FLOWC_PARAM(PORT, pi->tx_chan); 157 FLOWC_PARAM(IQID, toep->ofld_rxq->iq.abs_id); 158 if (ftxp) { 159 uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf); 160 161 FLOWC_PARAM(SNDNXT, ftxp->snd_nxt); 162 FLOWC_PARAM(RCVNXT, ftxp->rcv_nxt); 163 FLOWC_PARAM(SNDBUF, sndbuf); 164 FLOWC_PARAM(MSS, ftxp->mss); 165 166 CTR6(KTR_CXGBE, 167 "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x", 168 __func__, toep->tid, ftxp->mss, sndbuf, ftxp->snd_nxt, 169 ftxp->rcv_nxt); 170 } else { 171 FLOWC_PARAM(SNDBUF, 512); 172 FLOWC_PARAM(MSS, 512); 173 174 CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); 175 } 176 if (toep->ulp_mode == ULP_MODE_TLS) 177 FLOWC_PARAM(ULP_MODE, toep->ulp_mode); 178 if (toep->tls.fcplenmax != 0) 179 FLOWC_PARAM(TXDATAPLEN_MAX, toep->tls.fcplenmax); 180 if (toep->tc_idx != -1) 181 FLOWC_PARAM(SCHEDCLASS, toep->tc_idx); 182 #undef FLOWC_PARAM 183 184 KASSERT(paramidx == nparams, ("nparams mismatch")); 185 186 txsd->tx_credits = howmany(flowclen, 16); 187 txsd->plen = 0; 188 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 189 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 190 toep->tx_credits -= txsd->tx_credits; 191 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 192 toep->txsd_pidx = 0; 193 toep->txsd_avail--; 194 195 toep->flags |= TPF_FLOWC_WR_SENT; 196 t4_wrq_tx(sc, wr); 197 } 198 199 #ifdef RATELIMIT 200 /* 201 * Input is Bytes/second (so_max_pacing-rate), chip counts in Kilobits/second. 202 */ 203 static int 204 update_tx_rate_limit(struct adapter *sc, struct toepcb *toep, u_int Bps) 205 { 206 int tc_idx, rc; 207 const u_int kbps = (u_int) (uint64_t)Bps * 8ULL / 1000; 208 const int port_id = toep->vi->pi->port_id; 209 210 CTR3(KTR_CXGBE, "%s: tid %u, rate %uKbps", __func__, toep->tid, kbps); 211 212 if (kbps == 0) { 213 /* unbind */ 214 tc_idx = -1; 215 } else { 216 rc = t4_reserve_cl_rl_kbps(sc, port_id, kbps, &tc_idx); 217 if (rc != 0) 218 return (rc); 219 MPASS(tc_idx >= 0 && tc_idx < sc->chip_params->nsched_cls); 220 } 221 222 if (toep->tc_idx != tc_idx) { 223 struct wrqe *wr; 224 struct fw_flowc_wr *flowc; 225 int nparams = 1, flowclen, flowclen16; 226 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 227 228 flowclen = sizeof(*flowc) + nparams * sizeof(struct 229 fw_flowc_mnemval); 230 flowclen16 = howmany(flowclen, 16); 231 if (toep->tx_credits < flowclen16 || toep->txsd_avail == 0 || 232 (wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq)) == NULL) { 233 if (tc_idx >= 0) 234 t4_release_cl_rl_kbps(sc, port_id, tc_idx); 235 return (ENOMEM); 236 } 237 238 flowc = wrtod(wr); 239 memset(flowc, 0, wr->wr_len); 240 241 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 242 V_FW_FLOWC_WR_NPARAMS(nparams)); 243 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(flowclen16) | 244 V_FW_WR_FLOWID(toep->tid)); 245 246 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; 247 if (tc_idx == -1) 248 flowc->mnemval[0].val = htobe32(0xff); 249 else 250 flowc->mnemval[0].val = htobe32(tc_idx); 251 252 txsd->tx_credits = flowclen16; 253 txsd->plen = 0; 254 toep->tx_credits -= txsd->tx_credits; 255 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 256 toep->txsd_pidx = 0; 257 toep->txsd_avail--; 258 t4_wrq_tx(sc, wr); 259 } 260 261 if (toep->tc_idx >= 0) 262 t4_release_cl_rl_kbps(sc, port_id, toep->tc_idx); 263 toep->tc_idx = tc_idx; 264 265 return (0); 266 } 267 #endif 268 269 void 270 send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt) 271 { 272 struct wrqe *wr; 273 struct cpl_abort_req *req; 274 int tid = toep->tid; 275 struct inpcb *inp = toep->inp; 276 struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */ 277 278 INP_WLOCK_ASSERT(inp); 279 280 CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s", 281 __func__, toep->tid, 282 inp->inp_flags & INP_DROPPED ? "inp dropped" : 283 tcpstates[tp->t_state], 284 toep->flags, inp->inp_flags, 285 toep->flags & TPF_ABORT_SHUTDOWN ? 286 " (abort already in progress)" : ""); 287 288 if (toep->flags & TPF_ABORT_SHUTDOWN) 289 return; /* abort already in progress */ 290 291 toep->flags |= TPF_ABORT_SHUTDOWN; 292 293 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 294 ("%s: flowc_wr not sent for tid %d.", __func__, tid)); 295 296 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 297 if (wr == NULL) { 298 /* XXX */ 299 panic("%s: allocation failure.", __func__); 300 } 301 req = wrtod(wr); 302 303 INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid); 304 if (inp->inp_flags & INP_DROPPED) 305 req->rsvd0 = htobe32(snd_nxt); 306 else 307 req->rsvd0 = htobe32(tp->snd_nxt); 308 req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT); 309 req->cmd = CPL_ABORT_SEND_RST; 310 311 /* 312 * XXX: What's the correct way to tell that the inp hasn't been detached 313 * from its socket? Should I even be flushing the snd buffer here? 314 */ 315 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 316 struct socket *so = inp->inp_socket; 317 318 if (so != NULL) /* because I'm not sure. See comment above */ 319 sbflush(&so->so_snd); 320 } 321 322 t4_l2t_send(sc, wr, toep->l2te); 323 } 324 325 /* 326 * Called when a connection is established to translate the TCP options 327 * reported by HW to FreeBSD's native format. 328 */ 329 static void 330 assign_rxopt(struct tcpcb *tp, unsigned int opt) 331 { 332 struct toepcb *toep = tp->t_toe; 333 struct inpcb *inp = tp->t_inpcb; 334 struct adapter *sc = td_adapter(toep->td); 335 int n; 336 337 INP_LOCK_ASSERT(inp); 338 339 if (inp->inp_inc.inc_flags & INC_ISIPV6) 340 n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 341 else 342 n = sizeof(struct ip) + sizeof(struct tcphdr); 343 tp->t_maxseg = sc->params.mtus[G_TCPOPT_MSS(opt)] - n; 344 345 if (G_TCPOPT_TSTAMP(opt)) { 346 tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */ 347 tp->ts_recent = 0; /* hmmm */ 348 tp->ts_recent_age = tcp_ts_getticks(); 349 tp->t_maxseg -= TCPOLEN_TSTAMP_APPA; 350 } 351 352 CTR5(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u), mss %u", __func__, 353 toep->tid, G_TCPOPT_MSS(opt), sc->params.mtus[G_TCPOPT_MSS(opt)], 354 tp->t_maxseg); 355 356 if (G_TCPOPT_SACK(opt)) 357 tp->t_flags |= TF_SACK_PERMIT; /* should already be set */ 358 else 359 tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */ 360 361 if (G_TCPOPT_WSCALE_OK(opt)) 362 tp->t_flags |= TF_RCVD_SCALE; 363 364 /* Doing window scaling? */ 365 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 366 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 367 tp->rcv_scale = tp->request_r_scale; 368 tp->snd_scale = G_TCPOPT_SND_WSCALE(opt); 369 } 370 } 371 372 /* 373 * Completes some final bits of initialization for just established connections 374 * and changes their state to TCPS_ESTABLISHED. 375 * 376 * The ISNs are from after the exchange of SYNs. i.e., the true ISN + 1. 377 */ 378 void 379 make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn, 380 uint16_t opt) 381 { 382 struct inpcb *inp = toep->inp; 383 struct socket *so = inp->inp_socket; 384 struct tcpcb *tp = intotcpcb(inp); 385 long bufsize; 386 uint32_t iss = be32toh(snd_isn) - 1; /* true ISS */ 387 uint32_t irs = be32toh(rcv_isn) - 1; /* true IRS */ 388 uint16_t tcpopt = be16toh(opt); 389 struct flowc_tx_params ftxp; 390 391 INP_WLOCK_ASSERT(inp); 392 KASSERT(tp->t_state == TCPS_SYN_SENT || 393 tp->t_state == TCPS_SYN_RECEIVED, 394 ("%s: TCP state %s", __func__, tcpstates[tp->t_state])); 395 396 CTR6(KTR_CXGBE, "%s: tid %d, so %p, inp %p, tp %p, toep %p", 397 __func__, toep->tid, so, inp, tp, toep); 398 399 tp->t_state = TCPS_ESTABLISHED; 400 tp->t_starttime = ticks; 401 TCPSTAT_INC(tcps_connects); 402 403 tp->irs = irs; 404 tcp_rcvseqinit(tp); 405 tp->rcv_wnd = toep->rx_credits << 10; 406 tp->rcv_adv += tp->rcv_wnd; 407 tp->last_ack_sent = tp->rcv_nxt; 408 409 /* 410 * If we were unable to send all rx credits via opt0, save the remainder 411 * in rx_credits so that they can be handed over with the next credit 412 * update. 413 */ 414 SOCKBUF_LOCK(&so->so_rcv); 415 bufsize = select_rcv_wnd(so); 416 SOCKBUF_UNLOCK(&so->so_rcv); 417 toep->rx_credits = bufsize - tp->rcv_wnd; 418 419 tp->iss = iss; 420 tcp_sendseqinit(tp); 421 tp->snd_una = iss + 1; 422 tp->snd_nxt = iss + 1; 423 tp->snd_max = iss + 1; 424 425 assign_rxopt(tp, tcpopt); 426 427 SOCKBUF_LOCK(&so->so_snd); 428 if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf) 429 bufsize = V_tcp_autosndbuf_max; 430 else 431 bufsize = sbspace(&so->so_snd); 432 SOCKBUF_UNLOCK(&so->so_snd); 433 434 ftxp.snd_nxt = tp->snd_nxt; 435 ftxp.rcv_nxt = tp->rcv_nxt; 436 ftxp.snd_space = bufsize; 437 ftxp.mss = tp->t_maxseg; 438 send_flowc_wr(toep, &ftxp); 439 440 soisconnected(so); 441 } 442 443 int 444 send_rx_credits(struct adapter *sc, struct toepcb *toep, int credits) 445 { 446 struct wrqe *wr; 447 struct cpl_rx_data_ack *req; 448 uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); 449 450 KASSERT(credits >= 0, ("%s: %d credits", __func__, credits)); 451 452 wr = alloc_wrqe(sizeof(*req), toep->ctrlq); 453 if (wr == NULL) 454 return (0); 455 req = wrtod(wr); 456 457 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); 458 req->credit_dack = htobe32(dack | V_RX_CREDITS(credits)); 459 460 t4_wrq_tx(sc, wr); 461 return (credits); 462 } 463 464 void 465 send_rx_modulate(struct adapter *sc, struct toepcb *toep) 466 { 467 struct wrqe *wr; 468 struct cpl_rx_data_ack *req; 469 470 wr = alloc_wrqe(sizeof(*req), toep->ctrlq); 471 if (wr == NULL) 472 return; 473 req = wrtod(wr); 474 475 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); 476 req->credit_dack = htobe32(F_RX_MODULATE_RX); 477 478 t4_wrq_tx(sc, wr); 479 } 480 481 void 482 t4_rcvd_locked(struct toedev *tod, struct tcpcb *tp) 483 { 484 struct adapter *sc = tod->tod_softc; 485 struct inpcb *inp = tp->t_inpcb; 486 struct socket *so = inp->inp_socket; 487 struct sockbuf *sb = &so->so_rcv; 488 struct toepcb *toep = tp->t_toe; 489 int credits; 490 491 INP_WLOCK_ASSERT(inp); 492 493 SOCKBUF_LOCK_ASSERT(sb); 494 KASSERT(toep->sb_cc >= sbused(sb), 495 ("%s: sb %p has more data (%d) than last time (%d).", 496 __func__, sb, sbused(sb), toep->sb_cc)); 497 498 credits = toep->sb_cc - sbused(sb); 499 toep->sb_cc = sbused(sb); 500 if (toep->ulp_mode == ULP_MODE_TLS) { 501 if (toep->tls.rcv_over >= credits) { 502 toep->tls.rcv_over -= credits; 503 credits = 0; 504 } else { 505 credits -= toep->tls.rcv_over; 506 toep->tls.rcv_over = 0; 507 } 508 } 509 toep->rx_credits += credits; 510 511 if (toep->rx_credits > 0 && 512 (tp->rcv_wnd <= 32 * 1024 || toep->rx_credits >= 64 * 1024 || 513 (toep->rx_credits >= 16 * 1024 && tp->rcv_wnd <= 128 * 1024) || 514 toep->sb_cc + tp->rcv_wnd < sb->sb_lowat)) { 515 516 credits = send_rx_credits(sc, toep, toep->rx_credits); 517 toep->rx_credits -= credits; 518 tp->rcv_wnd += credits; 519 tp->rcv_adv += credits; 520 } else if (toep->flags & TPF_FORCE_CREDITS) 521 send_rx_modulate(sc, toep); 522 } 523 524 void 525 t4_rcvd(struct toedev *tod, struct tcpcb *tp) 526 { 527 struct inpcb *inp = tp->t_inpcb; 528 struct socket *so = inp->inp_socket; 529 struct sockbuf *sb = &so->so_rcv; 530 531 SOCKBUF_LOCK(sb); 532 t4_rcvd_locked(tod, tp); 533 SOCKBUF_UNLOCK(sb); 534 } 535 536 /* 537 * Close a connection by sending a CPL_CLOSE_CON_REQ message. 538 */ 539 int 540 t4_close_conn(struct adapter *sc, struct toepcb *toep) 541 { 542 struct wrqe *wr; 543 struct cpl_close_con_req *req; 544 unsigned int tid = toep->tid; 545 546 CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid, 547 toep->flags & TPF_FIN_SENT ? ", IGNORED" : ""); 548 549 if (toep->flags & TPF_FIN_SENT) 550 return (0); 551 552 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 553 ("%s: flowc_wr not sent for tid %u.", __func__, tid)); 554 555 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 556 if (wr == NULL) { 557 /* XXX */ 558 panic("%s: allocation failure.", __func__); 559 } 560 req = wrtod(wr); 561 562 req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | 563 V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr))); 564 req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) | 565 V_FW_WR_FLOWID(tid)); 566 req->wr.wr_lo = cpu_to_be64(0); 567 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 568 req->rsvd = 0; 569 570 toep->flags |= TPF_FIN_SENT; 571 toep->flags &= ~TPF_SEND_FIN; 572 t4_l2t_send(sc, wr, toep->l2te); 573 574 return (0); 575 } 576 577 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16) 578 #define MIN_OFLD_TX_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16)) 579 580 /* Maximum amount of immediate data we could stuff in a WR */ 581 static inline int 582 max_imm_payload(int tx_credits) 583 { 584 const int n = 2; /* Use only up to 2 desc for imm. data WR */ 585 586 KASSERT(tx_credits >= 0 && 587 tx_credits <= MAX_OFLD_TX_CREDITS, 588 ("%s: %d credits", __func__, tx_credits)); 589 590 if (tx_credits < MIN_OFLD_TX_CREDITS) 591 return (0); 592 593 if (tx_credits >= (n * EQ_ESIZE) / 16) 594 return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr)); 595 else 596 return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr)); 597 } 598 599 /* Maximum number of SGL entries we could stuff in a WR */ 600 static inline int 601 max_dsgl_nsegs(int tx_credits) 602 { 603 int nseg = 1; /* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */ 604 int sge_pair_credits = tx_credits - MIN_OFLD_TX_CREDITS; 605 606 KASSERT(tx_credits >= 0 && 607 tx_credits <= MAX_OFLD_TX_CREDITS, 608 ("%s: %d credits", __func__, tx_credits)); 609 610 if (tx_credits < MIN_OFLD_TX_CREDITS) 611 return (0); 612 613 nseg += 2 * (sge_pair_credits * 16 / 24); 614 if ((sge_pair_credits * 16) % 24 == 16) 615 nseg++; 616 617 return (nseg); 618 } 619 620 static inline void 621 write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen, 622 unsigned int plen, uint8_t credits, int shove, int ulp_submode, int txalign) 623 { 624 struct fw_ofld_tx_data_wr *txwr = dst; 625 626 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) | 627 V_FW_WR_IMMDLEN(immdlen)); 628 txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) | 629 V_FW_WR_LEN16(credits)); 630 txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(toep->ulp_mode) | 631 V_TX_ULP_SUBMODE(ulp_submode) | V_TX_URG(0) | V_TX_SHOVE(shove)); 632 txwr->plen = htobe32(plen); 633 634 if (txalign > 0) { 635 struct tcpcb *tp = intotcpcb(toep->inp); 636 637 if (plen < 2 * tp->t_maxseg || is_10G_port(toep->vi->pi)) 638 txwr->lsodisable_to_flags |= 639 htobe32(F_FW_OFLD_TX_DATA_WR_LSODISABLE); 640 else 641 txwr->lsodisable_to_flags |= 642 htobe32(F_FW_OFLD_TX_DATA_WR_ALIGNPLD | 643 (tp->t_flags & TF_NODELAY ? 0 : 644 F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE)); 645 } 646 } 647 648 /* 649 * Generate a DSGL from a starting mbuf. The total number of segments and the 650 * maximum segments in any one mbuf are provided. 651 */ 652 static void 653 write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n) 654 { 655 struct mbuf *m; 656 struct ulptx_sgl *usgl = dst; 657 int i, j, rc; 658 struct sglist sg; 659 struct sglist_seg segs[n]; 660 661 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__)); 662 663 sglist_init(&sg, n, segs); 664 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 665 V_ULPTX_NSGE(nsegs)); 666 667 i = -1; 668 for (m = start; m != stop; m = m->m_next) { 669 if (IS_AIOTX_MBUF(m)) 670 rc = sglist_append_vmpages(&sg, aiotx_mbuf_pages(m), 671 aiotx_mbuf_pgoff(m), m->m_len); 672 else 673 rc = sglist_append(&sg, mtod(m, void *), m->m_len); 674 if (__predict_false(rc != 0)) 675 panic("%s: sglist_append %d", __func__, rc); 676 677 for (j = 0; j < sg.sg_nseg; i++, j++) { 678 if (i < 0) { 679 usgl->len0 = htobe32(segs[j].ss_len); 680 usgl->addr0 = htobe64(segs[j].ss_paddr); 681 } else { 682 usgl->sge[i / 2].len[i & 1] = 683 htobe32(segs[j].ss_len); 684 usgl->sge[i / 2].addr[i & 1] = 685 htobe64(segs[j].ss_paddr); 686 } 687 #ifdef INVARIANTS 688 nsegs--; 689 #endif 690 } 691 sglist_reset(&sg); 692 } 693 if (i & 1) 694 usgl->sge[i / 2].len[1] = htobe32(0); 695 KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, stop %p", 696 __func__, nsegs, start, stop)); 697 } 698 699 /* 700 * Max number of SGL entries an offload tx work request can have. This is 41 701 * (1 + 40) for a full 512B work request. 702 * fw_ofld_tx_data_wr(16B) + ulptx_sgl(16B, 1) + ulptx_sge_pair(480B, 40) 703 */ 704 #define OFLD_SGL_LEN (41) 705 706 /* 707 * Send data and/or a FIN to the peer. 708 * 709 * The socket's so_snd buffer consists of a stream of data starting with sb_mb 710 * and linked together with m_next. sb_sndptr, if set, is the last mbuf that 711 * was transmitted. 712 * 713 * drop indicates the number of bytes that should be dropped from the head of 714 * the send buffer. It is an optimization that lets do_fw4_ack avoid creating 715 * contention on the send buffer lock (before this change it used to do 716 * sowwakeup and then t4_push_frames right after that when recovering from tx 717 * stalls). When drop is set this function MUST drop the bytes and wake up any 718 * writers. 719 */ 720 void 721 t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop) 722 { 723 struct mbuf *sndptr, *m, *sb_sndptr; 724 struct fw_ofld_tx_data_wr *txwr; 725 struct wrqe *wr; 726 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 727 struct inpcb *inp = toep->inp; 728 struct tcpcb *tp = intotcpcb(inp); 729 struct socket *so = inp->inp_socket; 730 struct sockbuf *sb = &so->so_snd; 731 int tx_credits, shove, compl, sowwakeup; 732 struct ofld_tx_sdesc *txsd; 733 bool aiotx_mbuf_seen; 734 735 INP_WLOCK_ASSERT(inp); 736 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 737 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 738 739 KASSERT(toep->ulp_mode == ULP_MODE_NONE || 740 toep->ulp_mode == ULP_MODE_TCPDDP || 741 toep->ulp_mode == ULP_MODE_TLS || 742 toep->ulp_mode == ULP_MODE_RDMA, 743 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 744 745 #ifdef VERBOSE_TRACES 746 CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d", 747 __func__, toep->tid, toep->flags, tp->t_flags); 748 #endif 749 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) 750 return; 751 752 #ifdef RATELIMIT 753 if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) && 754 (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) { 755 inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED; 756 } 757 #endif 758 759 /* 760 * This function doesn't resume by itself. Someone else must clear the 761 * flag and call this function. 762 */ 763 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 764 KASSERT(drop == 0, 765 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 766 return; 767 } 768 769 txsd = &toep->txsd[toep->txsd_pidx]; 770 do { 771 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 772 max_imm = max_imm_payload(tx_credits); 773 max_nsegs = max_dsgl_nsegs(tx_credits); 774 775 SOCKBUF_LOCK(sb); 776 sowwakeup = drop; 777 if (drop) { 778 sbdrop_locked(sb, drop); 779 drop = 0; 780 } 781 sb_sndptr = sb->sb_sndptr; 782 sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb; 783 plen = 0; 784 nsegs = 0; 785 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 786 aiotx_mbuf_seen = false; 787 for (m = sndptr; m != NULL; m = m->m_next) { 788 int n; 789 790 if (IS_AIOTX_MBUF(m)) 791 n = sglist_count_vmpages(aiotx_mbuf_pages(m), 792 aiotx_mbuf_pgoff(m), m->m_len); 793 else 794 n = sglist_count(mtod(m, void *), m->m_len); 795 796 nsegs += n; 797 plen += m->m_len; 798 799 /* This mbuf sent us _over_ the nsegs limit, back out */ 800 if (plen > max_imm && nsegs > max_nsegs) { 801 nsegs -= n; 802 plen -= m->m_len; 803 if (plen == 0) { 804 /* Too few credits */ 805 toep->flags |= TPF_TX_SUSPENDED; 806 if (sowwakeup) { 807 if (!TAILQ_EMPTY( 808 &toep->aiotx_jobq)) 809 t4_aiotx_queue_toep( 810 toep); 811 sowwakeup_locked(so); 812 } else 813 SOCKBUF_UNLOCK(sb); 814 SOCKBUF_UNLOCK_ASSERT(sb); 815 return; 816 } 817 break; 818 } 819 820 if (IS_AIOTX_MBUF(m)) 821 aiotx_mbuf_seen = true; 822 if (max_nsegs_1mbuf < n) 823 max_nsegs_1mbuf = n; 824 sb_sndptr = m; /* new sb->sb_sndptr if all goes well */ 825 826 /* This mbuf put us right at the max_nsegs limit */ 827 if (plen > max_imm && nsegs == max_nsegs) { 828 m = m->m_next; 829 break; 830 } 831 } 832 833 if (sbused(sb) > sb->sb_hiwat * 5 / 8 && 834 toep->plen_nocompl + plen >= sb->sb_hiwat / 4) 835 compl = 1; 836 else 837 compl = 0; 838 839 if (sb->sb_flags & SB_AUTOSIZE && 840 V_tcp_do_autosndbuf && 841 sb->sb_hiwat < V_tcp_autosndbuf_max && 842 sbused(sb) >= sb->sb_hiwat * 7 / 8) { 843 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc, 844 V_tcp_autosndbuf_max); 845 846 if (!sbreserve_locked(sb, newsize, so, NULL)) 847 sb->sb_flags &= ~SB_AUTOSIZE; 848 else 849 sowwakeup = 1; /* room available */ 850 } 851 if (sowwakeup) { 852 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) 853 t4_aiotx_queue_toep(toep); 854 sowwakeup_locked(so); 855 } else 856 SOCKBUF_UNLOCK(sb); 857 SOCKBUF_UNLOCK_ASSERT(sb); 858 859 /* nothing to send */ 860 if (plen == 0) { 861 KASSERT(m == NULL, 862 ("%s: nothing to send, but m != NULL", __func__)); 863 break; 864 } 865 866 if (__predict_false(toep->flags & TPF_FIN_SENT)) 867 panic("%s: excess tx.", __func__); 868 869 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME); 870 if (plen <= max_imm && !aiotx_mbuf_seen) { 871 872 /* Immediate data tx */ 873 874 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 875 toep->ofld_txq); 876 if (wr == NULL) { 877 /* XXX: how will we recover from this? */ 878 toep->flags |= TPF_TX_SUSPENDED; 879 return; 880 } 881 txwr = wrtod(wr); 882 credits = howmany(wr->wr_len, 16); 883 write_tx_wr(txwr, toep, plen, plen, credits, shove, 0, 884 sc->tt.tx_align); 885 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 886 nsegs = 0; 887 } else { 888 int wr_len; 889 890 /* DSGL tx */ 891 892 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 893 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 894 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 895 if (wr == NULL) { 896 /* XXX: how will we recover from this? */ 897 toep->flags |= TPF_TX_SUSPENDED; 898 return; 899 } 900 txwr = wrtod(wr); 901 credits = howmany(wr_len, 16); 902 write_tx_wr(txwr, toep, 0, plen, credits, shove, 0, 903 sc->tt.tx_align); 904 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 905 max_nsegs_1mbuf); 906 if (wr_len & 0xf) { 907 uint64_t *pad = (uint64_t *) 908 ((uintptr_t)txwr + wr_len); 909 *pad = 0; 910 } 911 } 912 913 KASSERT(toep->tx_credits >= credits, 914 ("%s: not enough credits", __func__)); 915 916 toep->tx_credits -= credits; 917 toep->tx_nocompl += credits; 918 toep->plen_nocompl += plen; 919 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 920 toep->tx_nocompl >= toep->tx_total / 4) 921 compl = 1; 922 923 if (compl || toep->ulp_mode == ULP_MODE_RDMA) { 924 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 925 toep->tx_nocompl = 0; 926 toep->plen_nocompl = 0; 927 } 928 929 tp->snd_nxt += plen; 930 tp->snd_max += plen; 931 932 SOCKBUF_LOCK(sb); 933 KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__)); 934 sb->sb_sndptr = sb_sndptr; 935 SOCKBUF_UNLOCK(sb); 936 937 toep->flags |= TPF_TX_DATA_SENT; 938 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 939 toep->flags |= TPF_TX_SUSPENDED; 940 941 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 942 txsd->plen = plen; 943 txsd->tx_credits = credits; 944 txsd++; 945 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 946 toep->txsd_pidx = 0; 947 txsd = &toep->txsd[0]; 948 } 949 toep->txsd_avail--; 950 951 t4_l2t_send(sc, wr, toep->l2te); 952 } while (m != NULL); 953 954 /* Send a FIN if requested, but only if there's no more data to send */ 955 if (m == NULL && toep->flags & TPF_SEND_FIN) 956 t4_close_conn(sc, toep); 957 } 958 959 static inline void 960 rqdrop_locked(struct mbufq *q, int plen) 961 { 962 struct mbuf *m; 963 964 while (plen > 0) { 965 m = mbufq_dequeue(q); 966 967 /* Too many credits. */ 968 MPASS(m != NULL); 969 M_ASSERTPKTHDR(m); 970 971 /* Partial credits. */ 972 MPASS(plen >= m->m_pkthdr.len); 973 974 plen -= m->m_pkthdr.len; 975 m_freem(m); 976 } 977 } 978 979 void 980 t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop) 981 { 982 struct mbuf *sndptr, *m; 983 struct fw_ofld_tx_data_wr *txwr; 984 struct wrqe *wr; 985 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 986 u_int adjusted_plen, ulp_submode; 987 struct inpcb *inp = toep->inp; 988 struct tcpcb *tp = intotcpcb(inp); 989 int tx_credits, shove; 990 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 991 struct mbufq *pduq = &toep->ulp_pduq; 992 static const u_int ulp_extra_len[] = {0, 4, 4, 8}; 993 994 INP_WLOCK_ASSERT(inp); 995 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 996 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 997 KASSERT(toep->ulp_mode == ULP_MODE_ISCSI, 998 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 999 1000 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) 1001 return; 1002 1003 /* 1004 * This function doesn't resume by itself. Someone else must clear the 1005 * flag and call this function. 1006 */ 1007 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 1008 KASSERT(drop == 0, 1009 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 1010 return; 1011 } 1012 1013 if (drop) 1014 rqdrop_locked(&toep->ulp_pdu_reclaimq, drop); 1015 1016 while ((sndptr = mbufq_first(pduq)) != NULL) { 1017 M_ASSERTPKTHDR(sndptr); 1018 1019 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 1020 max_imm = max_imm_payload(tx_credits); 1021 max_nsegs = max_dsgl_nsegs(tx_credits); 1022 1023 plen = 0; 1024 nsegs = 0; 1025 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 1026 for (m = sndptr; m != NULL; m = m->m_next) { 1027 int n = sglist_count(mtod(m, void *), m->m_len); 1028 1029 nsegs += n; 1030 plen += m->m_len; 1031 1032 /* 1033 * This mbuf would send us _over_ the nsegs limit. 1034 * Suspend tx because the PDU can't be sent out. 1035 */ 1036 if (plen > max_imm && nsegs > max_nsegs) { 1037 toep->flags |= TPF_TX_SUSPENDED; 1038 return; 1039 } 1040 1041 if (max_nsegs_1mbuf < n) 1042 max_nsegs_1mbuf = n; 1043 } 1044 1045 if (__predict_false(toep->flags & TPF_FIN_SENT)) 1046 panic("%s: excess tx.", __func__); 1047 1048 /* 1049 * We have a PDU to send. All of it goes out in one WR so 'm' 1050 * is NULL. A PDU's length is always a multiple of 4. 1051 */ 1052 MPASS(m == NULL); 1053 MPASS((plen & 3) == 0); 1054 MPASS(sndptr->m_pkthdr.len == plen); 1055 1056 shove = !(tp->t_flags & TF_MORETOCOME); 1057 ulp_submode = mbuf_ulp_submode(sndptr); 1058 MPASS(ulp_submode < nitems(ulp_extra_len)); 1059 1060 /* 1061 * plen doesn't include header and data digests, which are 1062 * generated and inserted in the right places by the TOE, but 1063 * they do occupy TCP sequence space and need to be accounted 1064 * for. 1065 */ 1066 adjusted_plen = plen + ulp_extra_len[ulp_submode]; 1067 if (plen <= max_imm) { 1068 1069 /* Immediate data tx */ 1070 1071 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 1072 toep->ofld_txq); 1073 if (wr == NULL) { 1074 /* XXX: how will we recover from this? */ 1075 toep->flags |= TPF_TX_SUSPENDED; 1076 return; 1077 } 1078 txwr = wrtod(wr); 1079 credits = howmany(wr->wr_len, 16); 1080 write_tx_wr(txwr, toep, plen, adjusted_plen, credits, 1081 shove, ulp_submode, sc->tt.tx_align); 1082 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 1083 nsegs = 0; 1084 } else { 1085 int wr_len; 1086 1087 /* DSGL tx */ 1088 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 1089 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 1090 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 1091 if (wr == NULL) { 1092 /* XXX: how will we recover from this? */ 1093 toep->flags |= TPF_TX_SUSPENDED; 1094 return; 1095 } 1096 txwr = wrtod(wr); 1097 credits = howmany(wr_len, 16); 1098 write_tx_wr(txwr, toep, 0, adjusted_plen, credits, 1099 shove, ulp_submode, sc->tt.tx_align); 1100 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 1101 max_nsegs_1mbuf); 1102 if (wr_len & 0xf) { 1103 uint64_t *pad = (uint64_t *) 1104 ((uintptr_t)txwr + wr_len); 1105 *pad = 0; 1106 } 1107 } 1108 1109 KASSERT(toep->tx_credits >= credits, 1110 ("%s: not enough credits", __func__)); 1111 1112 m = mbufq_dequeue(pduq); 1113 MPASS(m == sndptr); 1114 mbufq_enqueue(&toep->ulp_pdu_reclaimq, m); 1115 1116 toep->tx_credits -= credits; 1117 toep->tx_nocompl += credits; 1118 toep->plen_nocompl += plen; 1119 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 1120 toep->tx_nocompl >= toep->tx_total / 4) { 1121 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 1122 toep->tx_nocompl = 0; 1123 toep->plen_nocompl = 0; 1124 } 1125 1126 tp->snd_nxt += adjusted_plen; 1127 tp->snd_max += adjusted_plen; 1128 1129 toep->flags |= TPF_TX_DATA_SENT; 1130 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 1131 toep->flags |= TPF_TX_SUSPENDED; 1132 1133 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 1134 txsd->plen = plen; 1135 txsd->tx_credits = credits; 1136 txsd++; 1137 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 1138 toep->txsd_pidx = 0; 1139 txsd = &toep->txsd[0]; 1140 } 1141 toep->txsd_avail--; 1142 1143 t4_l2t_send(sc, wr, toep->l2te); 1144 } 1145 1146 /* Send a FIN if requested, but only if there are no more PDUs to send */ 1147 if (mbufq_first(pduq) == NULL && toep->flags & TPF_SEND_FIN) 1148 t4_close_conn(sc, toep); 1149 } 1150 1151 int 1152 t4_tod_output(struct toedev *tod, struct tcpcb *tp) 1153 { 1154 struct adapter *sc = tod->tod_softc; 1155 #ifdef INVARIANTS 1156 struct inpcb *inp = tp->t_inpcb; 1157 #endif 1158 struct toepcb *toep = tp->t_toe; 1159 1160 INP_WLOCK_ASSERT(inp); 1161 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1162 ("%s: inp %p dropped.", __func__, inp)); 1163 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1164 1165 if (toep->ulp_mode == ULP_MODE_ISCSI) 1166 t4_push_pdus(sc, toep, 0); 1167 else if (tls_tx_key(toep)) 1168 t4_push_tls_records(sc, toep, 0); 1169 else 1170 t4_push_frames(sc, toep, 0); 1171 1172 return (0); 1173 } 1174 1175 int 1176 t4_send_fin(struct toedev *tod, struct tcpcb *tp) 1177 { 1178 struct adapter *sc = tod->tod_softc; 1179 #ifdef INVARIANTS 1180 struct inpcb *inp = tp->t_inpcb; 1181 #endif 1182 struct toepcb *toep = tp->t_toe; 1183 1184 INP_WLOCK_ASSERT(inp); 1185 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1186 ("%s: inp %p dropped.", __func__, inp)); 1187 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1188 1189 toep->flags |= TPF_SEND_FIN; 1190 if (tp->t_state >= TCPS_ESTABLISHED) { 1191 if (toep->ulp_mode == ULP_MODE_ISCSI) 1192 t4_push_pdus(sc, toep, 0); 1193 else if (tls_tx_key(toep)) 1194 t4_push_tls_records(sc, toep, 0); 1195 else 1196 t4_push_frames(sc, toep, 0); 1197 } 1198 1199 return (0); 1200 } 1201 1202 int 1203 t4_send_rst(struct toedev *tod, struct tcpcb *tp) 1204 { 1205 struct adapter *sc = tod->tod_softc; 1206 #if defined(INVARIANTS) 1207 struct inpcb *inp = tp->t_inpcb; 1208 #endif 1209 struct toepcb *toep = tp->t_toe; 1210 1211 INP_WLOCK_ASSERT(inp); 1212 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1213 ("%s: inp %p dropped.", __func__, inp)); 1214 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1215 1216 /* hmmmm */ 1217 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 1218 ("%s: flowc for tid %u [%s] not sent already", 1219 __func__, toep->tid, tcpstates[tp->t_state])); 1220 1221 send_reset(sc, toep, 0); 1222 return (0); 1223 } 1224 1225 /* 1226 * Peer has sent us a FIN. 1227 */ 1228 static int 1229 do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1230 { 1231 struct adapter *sc = iq->adapter; 1232 const struct cpl_peer_close *cpl = (const void *)(rss + 1); 1233 unsigned int tid = GET_TID(cpl); 1234 struct toepcb *toep = lookup_tid(sc, tid); 1235 struct inpcb *inp = toep->inp; 1236 struct tcpcb *tp = NULL; 1237 struct socket *so; 1238 #ifdef INVARIANTS 1239 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1240 #endif 1241 1242 KASSERT(opcode == CPL_PEER_CLOSE, 1243 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1244 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1245 1246 if (__predict_false(toep->flags & TPF_SYNQE)) { 1247 #ifdef INVARIANTS 1248 struct synq_entry *synqe = (void *)toep; 1249 1250 INP_WLOCK(synqe->lctx->inp); 1251 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1252 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1253 ("%s: listen socket closed but tid %u not aborted.", 1254 __func__, tid)); 1255 } else { 1256 /* 1257 * do_pass_accept_req is still running and will 1258 * eventually take care of this tid. 1259 */ 1260 } 1261 INP_WUNLOCK(synqe->lctx->inp); 1262 #endif 1263 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1264 toep, toep->flags); 1265 return (0); 1266 } 1267 1268 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1269 1270 CURVNET_SET(toep->vnet); 1271 INP_INFO_RLOCK(&V_tcbinfo); 1272 INP_WLOCK(inp); 1273 tp = intotcpcb(inp); 1274 1275 CTR5(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x, inp %p", __func__, 1276 tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, inp); 1277 1278 if (toep->flags & TPF_ABORT_SHUTDOWN) 1279 goto done; 1280 1281 tp->rcv_nxt++; /* FIN */ 1282 1283 so = inp->inp_socket; 1284 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1285 DDP_LOCK(toep); 1286 if (__predict_false(toep->ddp.flags & 1287 (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE))) 1288 handle_ddp_close(toep, tp, cpl->rcv_nxt); 1289 DDP_UNLOCK(toep); 1290 } 1291 socantrcvmore(so); 1292 1293 if (toep->ulp_mode != ULP_MODE_RDMA) { 1294 KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt), 1295 ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt, 1296 be32toh(cpl->rcv_nxt))); 1297 } 1298 1299 switch (tp->t_state) { 1300 case TCPS_SYN_RECEIVED: 1301 tp->t_starttime = ticks; 1302 /* FALLTHROUGH */ 1303 1304 case TCPS_ESTABLISHED: 1305 tp->t_state = TCPS_CLOSE_WAIT; 1306 break; 1307 1308 case TCPS_FIN_WAIT_1: 1309 tp->t_state = TCPS_CLOSING; 1310 break; 1311 1312 case TCPS_FIN_WAIT_2: 1313 tcp_twstart(tp); 1314 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1315 INP_INFO_RUNLOCK(&V_tcbinfo); 1316 CURVNET_RESTORE(); 1317 1318 INP_WLOCK(inp); 1319 final_cpl_received(toep); 1320 return (0); 1321 1322 default: 1323 log(LOG_ERR, "%s: TID %u received CPL_PEER_CLOSE in state %d\n", 1324 __func__, tid, tp->t_state); 1325 } 1326 done: 1327 INP_WUNLOCK(inp); 1328 INP_INFO_RUNLOCK(&V_tcbinfo); 1329 CURVNET_RESTORE(); 1330 return (0); 1331 } 1332 1333 /* 1334 * Peer has ACK'd our FIN. 1335 */ 1336 static int 1337 do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss, 1338 struct mbuf *m) 1339 { 1340 struct adapter *sc = iq->adapter; 1341 const struct cpl_close_con_rpl *cpl = (const void *)(rss + 1); 1342 unsigned int tid = GET_TID(cpl); 1343 struct toepcb *toep = lookup_tid(sc, tid); 1344 struct inpcb *inp = toep->inp; 1345 struct tcpcb *tp = NULL; 1346 struct socket *so = NULL; 1347 #ifdef INVARIANTS 1348 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1349 #endif 1350 1351 KASSERT(opcode == CPL_CLOSE_CON_RPL, 1352 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1353 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1354 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1355 1356 CURVNET_SET(toep->vnet); 1357 INP_INFO_RLOCK(&V_tcbinfo); 1358 INP_WLOCK(inp); 1359 tp = intotcpcb(inp); 1360 1361 CTR4(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x", 1362 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags); 1363 1364 if (toep->flags & TPF_ABORT_SHUTDOWN) 1365 goto done; 1366 1367 so = inp->inp_socket; 1368 tp->snd_una = be32toh(cpl->snd_nxt) - 1; /* exclude FIN */ 1369 1370 switch (tp->t_state) { 1371 case TCPS_CLOSING: /* see TCPS_FIN_WAIT_2 in do_peer_close too */ 1372 tcp_twstart(tp); 1373 release: 1374 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1375 INP_INFO_RUNLOCK(&V_tcbinfo); 1376 CURVNET_RESTORE(); 1377 1378 INP_WLOCK(inp); 1379 final_cpl_received(toep); /* no more CPLs expected */ 1380 1381 return (0); 1382 case TCPS_LAST_ACK: 1383 if (tcp_close(tp)) 1384 INP_WUNLOCK(inp); 1385 goto release; 1386 1387 case TCPS_FIN_WAIT_1: 1388 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 1389 soisdisconnected(so); 1390 tp->t_state = TCPS_FIN_WAIT_2; 1391 break; 1392 1393 default: 1394 log(LOG_ERR, 1395 "%s: TID %u received CPL_CLOSE_CON_RPL in state %s\n", 1396 __func__, tid, tcpstates[tp->t_state]); 1397 } 1398 done: 1399 INP_WUNLOCK(inp); 1400 INP_INFO_RUNLOCK(&V_tcbinfo); 1401 CURVNET_RESTORE(); 1402 return (0); 1403 } 1404 1405 void 1406 send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid, 1407 int rst_status) 1408 { 1409 struct wrqe *wr; 1410 struct cpl_abort_rpl *cpl; 1411 1412 wr = alloc_wrqe(sizeof(*cpl), ofld_txq); 1413 if (wr == NULL) { 1414 /* XXX */ 1415 panic("%s: allocation failure.", __func__); 1416 } 1417 cpl = wrtod(wr); 1418 1419 INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid); 1420 cpl->cmd = rst_status; 1421 1422 t4_wrq_tx(sc, wr); 1423 } 1424 1425 static int 1426 abort_status_to_errno(struct tcpcb *tp, unsigned int abort_reason) 1427 { 1428 switch (abort_reason) { 1429 case CPL_ERR_BAD_SYN: 1430 case CPL_ERR_CONN_RESET: 1431 return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET); 1432 case CPL_ERR_XMIT_TIMEDOUT: 1433 case CPL_ERR_PERSIST_TIMEDOUT: 1434 case CPL_ERR_FINWAIT2_TIMEDOUT: 1435 case CPL_ERR_KEEPALIVE_TIMEDOUT: 1436 return (ETIMEDOUT); 1437 default: 1438 return (EIO); 1439 } 1440 } 1441 1442 /* 1443 * TCP RST from the peer, timeout, or some other such critical error. 1444 */ 1445 static int 1446 do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1447 { 1448 struct adapter *sc = iq->adapter; 1449 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); 1450 unsigned int tid = GET_TID(cpl); 1451 struct toepcb *toep = lookup_tid(sc, tid); 1452 struct sge_wrq *ofld_txq = toep->ofld_txq; 1453 struct inpcb *inp; 1454 struct tcpcb *tp; 1455 #ifdef INVARIANTS 1456 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1457 #endif 1458 1459 KASSERT(opcode == CPL_ABORT_REQ_RSS, 1460 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1461 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1462 1463 if (toep->flags & TPF_SYNQE) 1464 return (do_abort_req_synqe(iq, rss, m)); 1465 1466 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1467 1468 if (negative_advice(cpl->status)) { 1469 CTR4(KTR_CXGBE, "%s: negative advice %d for tid %d (0x%x)", 1470 __func__, cpl->status, tid, toep->flags); 1471 return (0); /* Ignore negative advice */ 1472 } 1473 1474 inp = toep->inp; 1475 CURVNET_SET(toep->vnet); 1476 INP_INFO_RLOCK(&V_tcbinfo); /* for tcp_close */ 1477 INP_WLOCK(inp); 1478 1479 tp = intotcpcb(inp); 1480 1481 CTR6(KTR_CXGBE, 1482 "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x, status %d", 1483 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, 1484 inp->inp_flags, cpl->status); 1485 1486 /* 1487 * If we'd initiated an abort earlier the reply to it is responsible for 1488 * cleaning up resources. Otherwise we tear everything down right here 1489 * right now. We owe the T4 a CPL_ABORT_RPL no matter what. 1490 */ 1491 if (toep->flags & TPF_ABORT_SHUTDOWN) { 1492 INP_WUNLOCK(inp); 1493 goto done; 1494 } 1495 toep->flags |= TPF_ABORT_SHUTDOWN; 1496 1497 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 1498 struct socket *so = inp->inp_socket; 1499 1500 if (so != NULL) 1501 so_error_set(so, abort_status_to_errno(tp, 1502 cpl->status)); 1503 tp = tcp_close(tp); 1504 if (tp == NULL) 1505 INP_WLOCK(inp); /* re-acquire */ 1506 } 1507 1508 final_cpl_received(toep); 1509 done: 1510 INP_INFO_RUNLOCK(&V_tcbinfo); 1511 CURVNET_RESTORE(); 1512 send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST); 1513 return (0); 1514 } 1515 1516 /* 1517 * Reply to the CPL_ABORT_REQ (send_reset) 1518 */ 1519 static int 1520 do_abort_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1521 { 1522 struct adapter *sc = iq->adapter; 1523 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); 1524 unsigned int tid = GET_TID(cpl); 1525 struct toepcb *toep = lookup_tid(sc, tid); 1526 struct inpcb *inp = toep->inp; 1527 #ifdef INVARIANTS 1528 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1529 #endif 1530 1531 KASSERT(opcode == CPL_ABORT_RPL_RSS, 1532 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1533 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1534 1535 if (toep->flags & TPF_SYNQE) 1536 return (do_abort_rpl_synqe(iq, rss, m)); 1537 1538 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1539 1540 CTR5(KTR_CXGBE, "%s: tid %u, toep %p, inp %p, status %d", 1541 __func__, tid, toep, inp, cpl->status); 1542 1543 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1544 ("%s: wasn't expecting abort reply", __func__)); 1545 1546 INP_WLOCK(inp); 1547 final_cpl_received(toep); 1548 1549 return (0); 1550 } 1551 1552 static int 1553 do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1554 { 1555 struct adapter *sc = iq->adapter; 1556 const struct cpl_rx_data *cpl = mtod(m, const void *); 1557 unsigned int tid = GET_TID(cpl); 1558 struct toepcb *toep = lookup_tid(sc, tid); 1559 struct inpcb *inp = toep->inp; 1560 struct tcpcb *tp; 1561 struct socket *so; 1562 struct sockbuf *sb; 1563 int len; 1564 uint32_t ddp_placed = 0; 1565 1566 if (__predict_false(toep->flags & TPF_SYNQE)) { 1567 #ifdef INVARIANTS 1568 struct synq_entry *synqe = (void *)toep; 1569 1570 INP_WLOCK(synqe->lctx->inp); 1571 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1572 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1573 ("%s: listen socket closed but tid %u not aborted.", 1574 __func__, tid)); 1575 } else { 1576 /* 1577 * do_pass_accept_req is still running and will 1578 * eventually take care of this tid. 1579 */ 1580 } 1581 INP_WUNLOCK(synqe->lctx->inp); 1582 #endif 1583 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1584 toep, toep->flags); 1585 m_freem(m); 1586 return (0); 1587 } 1588 1589 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1590 1591 /* strip off CPL header */ 1592 m_adj(m, sizeof(*cpl)); 1593 len = m->m_pkthdr.len; 1594 1595 INP_WLOCK(inp); 1596 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) { 1597 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", 1598 __func__, tid, len, inp->inp_flags); 1599 INP_WUNLOCK(inp); 1600 m_freem(m); 1601 return (0); 1602 } 1603 1604 tp = intotcpcb(inp); 1605 1606 if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq))) 1607 ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt; 1608 1609 tp->rcv_nxt += len; 1610 if (tp->rcv_wnd < len) { 1611 KASSERT(toep->ulp_mode == ULP_MODE_RDMA, 1612 ("%s: negative window size", __func__)); 1613 } 1614 1615 tp->rcv_wnd -= len; 1616 tp->t_rcvtime = ticks; 1617 1618 if (toep->ulp_mode == ULP_MODE_TCPDDP) 1619 DDP_LOCK(toep); 1620 so = inp_inpcbtosocket(inp); 1621 sb = &so->so_rcv; 1622 SOCKBUF_LOCK(sb); 1623 1624 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 1625 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)", 1626 __func__, tid, len); 1627 m_freem(m); 1628 SOCKBUF_UNLOCK(sb); 1629 if (toep->ulp_mode == ULP_MODE_TCPDDP) 1630 DDP_UNLOCK(toep); 1631 INP_WUNLOCK(inp); 1632 1633 CURVNET_SET(toep->vnet); 1634 INP_INFO_RLOCK(&V_tcbinfo); 1635 INP_WLOCK(inp); 1636 tp = tcp_drop(tp, ECONNRESET); 1637 if (tp) 1638 INP_WUNLOCK(inp); 1639 INP_INFO_RUNLOCK(&V_tcbinfo); 1640 CURVNET_RESTORE(); 1641 1642 return (0); 1643 } 1644 1645 /* receive buffer autosize */ 1646 MPASS(toep->vnet == so->so_vnet); 1647 CURVNET_SET(toep->vnet); 1648 if (sb->sb_flags & SB_AUTOSIZE && 1649 V_tcp_do_autorcvbuf && 1650 sb->sb_hiwat < V_tcp_autorcvbuf_max && 1651 len > (sbspace(sb) / 8 * 7)) { 1652 unsigned int hiwat = sb->sb_hiwat; 1653 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc, 1654 V_tcp_autorcvbuf_max); 1655 1656 if (!sbreserve_locked(sb, newsize, so, NULL)) 1657 sb->sb_flags &= ~SB_AUTOSIZE; 1658 else 1659 toep->rx_credits += newsize - hiwat; 1660 } 1661 1662 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1663 int changed = !(toep->ddp.flags & DDP_ON) ^ cpl->ddp_off; 1664 1665 if (toep->ddp.waiting_count != 0 || toep->ddp.active_count != 0) 1666 CTR3(KTR_CXGBE, "%s: tid %u, non-ddp rx (%d bytes)", 1667 __func__, tid, len); 1668 1669 if (changed) { 1670 if (toep->ddp.flags & DDP_SC_REQ) 1671 toep->ddp.flags ^= DDP_ON | DDP_SC_REQ; 1672 else { 1673 KASSERT(cpl->ddp_off == 1, 1674 ("%s: DDP switched on by itself.", 1675 __func__)); 1676 1677 /* Fell out of DDP mode */ 1678 toep->ddp.flags &= ~DDP_ON; 1679 CTR1(KTR_CXGBE, "%s: fell out of DDP mode", 1680 __func__); 1681 1682 insert_ddp_data(toep, ddp_placed); 1683 } 1684 } 1685 1686 if (toep->ddp.flags & DDP_ON) { 1687 /* 1688 * CPL_RX_DATA with DDP on can only be an indicate. 1689 * Start posting queued AIO requests via DDP. The 1690 * payload that arrived in this indicate is appended 1691 * to the socket buffer as usual. 1692 */ 1693 handle_ddp_indicate(toep); 1694 } 1695 } 1696 1697 KASSERT(toep->sb_cc >= sbused(sb), 1698 ("%s: sb %p has more data (%d) than last time (%d).", 1699 __func__, sb, sbused(sb), toep->sb_cc)); 1700 toep->rx_credits += toep->sb_cc - sbused(sb); 1701 sbappendstream_locked(sb, m, 0); 1702 toep->sb_cc = sbused(sb); 1703 if (toep->rx_credits > 0 && toep->sb_cc + tp->rcv_wnd < sb->sb_lowat) { 1704 int credits; 1705 1706 credits = send_rx_credits(sc, toep, toep->rx_credits); 1707 toep->rx_credits -= credits; 1708 tp->rcv_wnd += credits; 1709 tp->rcv_adv += credits; 1710 } 1711 1712 if (toep->ulp_mode == ULP_MODE_TCPDDP && toep->ddp.waiting_count > 0 && 1713 sbavail(sb) != 0) { 1714 CTR2(KTR_CXGBE, "%s: tid %u queueing AIO task", __func__, 1715 tid); 1716 ddp_queue_toep(toep); 1717 } 1718 sorwakeup_locked(so); 1719 SOCKBUF_UNLOCK_ASSERT(sb); 1720 if (toep->ulp_mode == ULP_MODE_TCPDDP) 1721 DDP_UNLOCK(toep); 1722 1723 INP_WUNLOCK(inp); 1724 CURVNET_RESTORE(); 1725 return (0); 1726 } 1727 1728 #define S_CPL_FW4_ACK_OPCODE 24 1729 #define M_CPL_FW4_ACK_OPCODE 0xff 1730 #define V_CPL_FW4_ACK_OPCODE(x) ((x) << S_CPL_FW4_ACK_OPCODE) 1731 #define G_CPL_FW4_ACK_OPCODE(x) \ 1732 (((x) >> S_CPL_FW4_ACK_OPCODE) & M_CPL_FW4_ACK_OPCODE) 1733 1734 #define S_CPL_FW4_ACK_FLOWID 0 1735 #define M_CPL_FW4_ACK_FLOWID 0xffffff 1736 #define V_CPL_FW4_ACK_FLOWID(x) ((x) << S_CPL_FW4_ACK_FLOWID) 1737 #define G_CPL_FW4_ACK_FLOWID(x) \ 1738 (((x) >> S_CPL_FW4_ACK_FLOWID) & M_CPL_FW4_ACK_FLOWID) 1739 1740 #define S_CPL_FW4_ACK_CR 24 1741 #define M_CPL_FW4_ACK_CR 0xff 1742 #define V_CPL_FW4_ACK_CR(x) ((x) << S_CPL_FW4_ACK_CR) 1743 #define G_CPL_FW4_ACK_CR(x) (((x) >> S_CPL_FW4_ACK_CR) & M_CPL_FW4_ACK_CR) 1744 1745 #define S_CPL_FW4_ACK_SEQVAL 0 1746 #define M_CPL_FW4_ACK_SEQVAL 0x1 1747 #define V_CPL_FW4_ACK_SEQVAL(x) ((x) << S_CPL_FW4_ACK_SEQVAL) 1748 #define G_CPL_FW4_ACK_SEQVAL(x) \ 1749 (((x) >> S_CPL_FW4_ACK_SEQVAL) & M_CPL_FW4_ACK_SEQVAL) 1750 #define F_CPL_FW4_ACK_SEQVAL V_CPL_FW4_ACK_SEQVAL(1U) 1751 1752 static int 1753 do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1754 { 1755 struct adapter *sc = iq->adapter; 1756 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 1757 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 1758 struct toepcb *toep = lookup_tid(sc, tid); 1759 struct inpcb *inp; 1760 struct tcpcb *tp; 1761 struct socket *so; 1762 uint8_t credits = cpl->credits; 1763 struct ofld_tx_sdesc *txsd; 1764 int plen; 1765 #ifdef INVARIANTS 1766 unsigned int opcode = G_CPL_FW4_ACK_OPCODE(be32toh(OPCODE_TID(cpl))); 1767 #endif 1768 1769 /* 1770 * Very unusual case: we'd sent a flowc + abort_req for a synq entry and 1771 * now this comes back carrying the credits for the flowc. 1772 */ 1773 if (__predict_false(toep->flags & TPF_SYNQE)) { 1774 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1775 ("%s: credits for a synq entry %p", __func__, toep)); 1776 return (0); 1777 } 1778 1779 inp = toep->inp; 1780 1781 KASSERT(opcode == CPL_FW4_ACK, 1782 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1783 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1784 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1785 1786 INP_WLOCK(inp); 1787 1788 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) { 1789 INP_WUNLOCK(inp); 1790 return (0); 1791 } 1792 1793 KASSERT((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0, 1794 ("%s: inp_flags 0x%x", __func__, inp->inp_flags)); 1795 1796 tp = intotcpcb(inp); 1797 1798 if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) { 1799 tcp_seq snd_una = be32toh(cpl->snd_una); 1800 1801 #ifdef INVARIANTS 1802 if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) { 1803 log(LOG_ERR, 1804 "%s: unexpected seq# %x for TID %u, snd_una %x\n", 1805 __func__, snd_una, toep->tid, tp->snd_una); 1806 } 1807 #endif 1808 1809 if (tp->snd_una != snd_una) { 1810 tp->snd_una = snd_una; 1811 tp->ts_recent_age = tcp_ts_getticks(); 1812 } 1813 } 1814 1815 #ifdef VERBOSE_TRACES 1816 CTR3(KTR_CXGBE, "%s: tid %d credits %u", __func__, tid, credits); 1817 #endif 1818 so = inp->inp_socket; 1819 txsd = &toep->txsd[toep->txsd_cidx]; 1820 plen = 0; 1821 while (credits) { 1822 KASSERT(credits >= txsd->tx_credits, 1823 ("%s: too many (or partial) credits", __func__)); 1824 credits -= txsd->tx_credits; 1825 toep->tx_credits += txsd->tx_credits; 1826 plen += txsd->plen; 1827 if (txsd->iv_buffer) { 1828 free(txsd->iv_buffer, M_CXGBE); 1829 txsd->iv_buffer = NULL; 1830 } 1831 txsd++; 1832 toep->txsd_avail++; 1833 KASSERT(toep->txsd_avail <= toep->txsd_total, 1834 ("%s: txsd avail > total", __func__)); 1835 if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) { 1836 txsd = &toep->txsd[0]; 1837 toep->txsd_cidx = 0; 1838 } 1839 } 1840 1841 if (toep->tx_credits == toep->tx_total) { 1842 toep->tx_nocompl = 0; 1843 toep->plen_nocompl = 0; 1844 } 1845 1846 if (toep->flags & TPF_TX_SUSPENDED && 1847 toep->tx_credits >= toep->tx_total / 4) { 1848 #ifdef VERBOSE_TRACES 1849 CTR2(KTR_CXGBE, "%s: tid %d calling t4_push_frames", __func__, 1850 tid); 1851 #endif 1852 toep->flags &= ~TPF_TX_SUSPENDED; 1853 CURVNET_SET(toep->vnet); 1854 if (toep->ulp_mode == ULP_MODE_ISCSI) 1855 t4_push_pdus(sc, toep, plen); 1856 else if (tls_tx_key(toep)) 1857 t4_push_tls_records(sc, toep, plen); 1858 else 1859 t4_push_frames(sc, toep, plen); 1860 CURVNET_RESTORE(); 1861 } else if (plen > 0) { 1862 struct sockbuf *sb = &so->so_snd; 1863 int sbu; 1864 1865 SOCKBUF_LOCK(sb); 1866 sbu = sbused(sb); 1867 if (toep->ulp_mode == ULP_MODE_ISCSI) { 1868 1869 if (__predict_false(sbu > 0)) { 1870 /* 1871 * The data trasmitted before the tid's ULP mode 1872 * changed to ISCSI is still in so_snd. 1873 * Incoming credits should account for so_snd 1874 * first. 1875 */ 1876 sbdrop_locked(sb, min(sbu, plen)); 1877 plen -= min(sbu, plen); 1878 } 1879 sowwakeup_locked(so); /* unlocks so_snd */ 1880 rqdrop_locked(&toep->ulp_pdu_reclaimq, plen); 1881 } else { 1882 #ifdef VERBOSE_TRACES 1883 CTR3(KTR_CXGBE, "%s: tid %d dropped %d bytes", __func__, 1884 tid, plen); 1885 #endif 1886 sbdrop_locked(sb, plen); 1887 if (tls_tx_key(toep)) { 1888 struct tls_ofld_info *tls_ofld = &toep->tls; 1889 1890 MPASS(tls_ofld->sb_off >= plen); 1891 tls_ofld->sb_off -= plen; 1892 } 1893 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) 1894 t4_aiotx_queue_toep(toep); 1895 sowwakeup_locked(so); /* unlocks so_snd */ 1896 } 1897 SOCKBUF_UNLOCK_ASSERT(sb); 1898 } 1899 1900 INP_WUNLOCK(inp); 1901 1902 return (0); 1903 } 1904 1905 void 1906 t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, struct toepcb *toep, 1907 uint16_t word, uint64_t mask, uint64_t val, int reply, int cookie) 1908 { 1909 struct wrqe *wr; 1910 struct cpl_set_tcb_field *req; 1911 struct ofld_tx_sdesc *txsd; 1912 1913 MPASS((cookie & ~M_COOKIE) == 0); 1914 if (reply) { 1915 MPASS(cookie != CPL_COOKIE_RESERVED); 1916 } 1917 1918 wr = alloc_wrqe(sizeof(*req), wrq); 1919 if (wr == NULL) { 1920 /* XXX */ 1921 panic("%s: allocation failure.", __func__); 1922 } 1923 req = wrtod(wr); 1924 1925 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid); 1926 req->reply_ctrl = htobe16(V_QUEUENO(toep->ofld_rxq->iq.abs_id)); 1927 if (reply == 0) 1928 req->reply_ctrl |= htobe16(F_NO_REPLY); 1929 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie)); 1930 req->mask = htobe64(mask); 1931 req->val = htobe64(val); 1932 if ((wrq->eq.flags & EQ_TYPEMASK) == EQ_OFLD) { 1933 txsd = &toep->txsd[toep->txsd_pidx]; 1934 txsd->tx_credits = howmany(sizeof(*req), 16); 1935 txsd->plen = 0; 1936 KASSERT(toep->tx_credits >= txsd->tx_credits && 1937 toep->txsd_avail > 0, 1938 ("%s: not enough credits (%d)", __func__, 1939 toep->tx_credits)); 1940 toep->tx_credits -= txsd->tx_credits; 1941 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 1942 toep->txsd_pidx = 0; 1943 toep->txsd_avail--; 1944 } 1945 1946 t4_wrq_tx(sc, wr); 1947 } 1948 1949 void 1950 t4_init_cpl_io_handlers(void) 1951 { 1952 1953 t4_register_cpl_handler(CPL_PEER_CLOSE, do_peer_close); 1954 t4_register_cpl_handler(CPL_CLOSE_CON_RPL, do_close_con_rpl); 1955 t4_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req); 1956 t4_register_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl); 1957 t4_register_cpl_handler(CPL_RX_DATA, do_rx_data); 1958 t4_register_cpl_handler(CPL_FW4_ACK, do_fw4_ack); 1959 } 1960 1961 void 1962 t4_uninit_cpl_io_handlers(void) 1963 { 1964 1965 t4_register_cpl_handler(CPL_PEER_CLOSE, NULL); 1966 t4_register_cpl_handler(CPL_CLOSE_CON_RPL, NULL); 1967 t4_register_cpl_handler(CPL_ABORT_REQ_RSS, NULL); 1968 t4_register_cpl_handler(CPL_ABORT_RPL_RSS, NULL); 1969 t4_register_cpl_handler(CPL_RX_DATA, NULL); 1970 t4_register_cpl_handler(CPL_FW4_ACK, NULL); 1971 } 1972 1973 /* 1974 * Use the 'backend3' field in AIO jobs to store the amount of data 1975 * sent by the AIO job so far and the 'backend4' field to hold an 1976 * error that should be reported when the job is completed. 1977 */ 1978 #define aio_sent backend3 1979 #define aio_error backend4 1980 1981 #define jobtotid(job) \ 1982 (((struct toepcb *)(so_sototcpcb((job)->fd_file->f_data)->t_toe))->tid) 1983 1984 static void 1985 free_aiotx_buffer(struct aiotx_buffer *ab) 1986 { 1987 struct kaiocb *job; 1988 long status; 1989 int error; 1990 1991 if (refcount_release(&ab->refcount) == 0) 1992 return; 1993 1994 job = ab->job; 1995 error = job->aio_error; 1996 status = job->aio_sent; 1997 vm_page_unhold_pages(ab->ps.pages, ab->ps.npages); 1998 free(ab, M_CXGBE); 1999 #ifdef VERBOSE_TRACES 2000 CTR5(KTR_CXGBE, "%s: tid %d completed %p len %ld, error %d", __func__, 2001 jobtotid(job), job, status, error); 2002 #endif 2003 if (error == ECANCELED && status != 0) 2004 error = 0; 2005 if (error == ECANCELED) 2006 aio_cancel(job); 2007 else if (error) 2008 aio_complete(job, -1, error); 2009 else 2010 aio_complete(job, status, 0); 2011 } 2012 2013 static void 2014 t4_aiotx_mbuf_free(struct mbuf *m) 2015 { 2016 struct aiotx_buffer *ab = m->m_ext.ext_arg1; 2017 2018 #ifdef VERBOSE_TRACES 2019 CTR3(KTR_CXGBE, "%s: completed %d bytes for tid %d", __func__, 2020 m->m_len, jobtotid(ab->job)); 2021 #endif 2022 free_aiotx_buffer(ab); 2023 } 2024 2025 /* 2026 * Hold the buffer backing an AIO request and return an AIO transmit 2027 * buffer. 2028 */ 2029 static int 2030 hold_aio(struct kaiocb *job) 2031 { 2032 struct aiotx_buffer *ab; 2033 struct vmspace *vm; 2034 vm_map_t map; 2035 vm_offset_t start, end, pgoff; 2036 int n; 2037 2038 MPASS(job->backend1 == NULL); 2039 2040 /* 2041 * The AIO subsystem will cancel and drain all requests before 2042 * permitting a process to exit or exec, so p_vmspace should 2043 * be stable here. 2044 */ 2045 vm = job->userproc->p_vmspace; 2046 map = &vm->vm_map; 2047 start = (uintptr_t)job->uaiocb.aio_buf; 2048 pgoff = start & PAGE_MASK; 2049 end = round_page(start + job->uaiocb.aio_nbytes); 2050 start = trunc_page(start); 2051 n = atop(end - start); 2052 2053 ab = malloc(sizeof(*ab) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK | 2054 M_ZERO); 2055 refcount_init(&ab->refcount, 1); 2056 ab->ps.pages = (vm_page_t *)(ab + 1); 2057 ab->ps.npages = vm_fault_quick_hold_pages(map, start, end - start, 2058 VM_PROT_WRITE, ab->ps.pages, n); 2059 if (ab->ps.npages < 0) { 2060 free(ab, M_CXGBE); 2061 return (EFAULT); 2062 } 2063 2064 KASSERT(ab->ps.npages == n, 2065 ("hold_aio: page count mismatch: %d vs %d", ab->ps.npages, n)); 2066 2067 ab->ps.offset = pgoff; 2068 ab->ps.len = job->uaiocb.aio_nbytes; 2069 ab->job = job; 2070 job->backend1 = ab; 2071 #ifdef VERBOSE_TRACES 2072 CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d", 2073 __func__, jobtotid(job), &ab->ps, job, ab->ps.npages); 2074 #endif 2075 return (0); 2076 } 2077 2078 static void 2079 t4_aiotx_process_job(struct toepcb *toep, struct socket *so, struct kaiocb *job) 2080 { 2081 struct adapter *sc; 2082 struct sockbuf *sb; 2083 struct file *fp; 2084 struct aiotx_buffer *ab; 2085 struct inpcb *inp; 2086 struct tcpcb *tp; 2087 struct mbuf *m; 2088 int error; 2089 bool moretocome, sendmore; 2090 2091 sc = td_adapter(toep->td); 2092 sb = &so->so_snd; 2093 SOCKBUF_UNLOCK(sb); 2094 fp = job->fd_file; 2095 ab = job->backend1; 2096 m = NULL; 2097 2098 #ifdef MAC 2099 error = mac_socket_check_send(fp->f_cred, so); 2100 if (error != 0) 2101 goto out; 2102 #endif 2103 2104 if (ab == NULL) { 2105 error = hold_aio(job); 2106 if (error != 0) 2107 goto out; 2108 ab = job->backend1; 2109 } 2110 2111 /* Inline sosend_generic(). */ 2112 2113 job->msgsnd = 1; 2114 2115 error = sblock(sb, SBL_WAIT); 2116 MPASS(error == 0); 2117 2118 sendanother: 2119 m = m_get(M_WAITOK, MT_DATA); 2120 2121 SOCKBUF_LOCK(sb); 2122 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 2123 SOCKBUF_UNLOCK(sb); 2124 sbunlock(sb); 2125 if ((so->so_options & SO_NOSIGPIPE) == 0) { 2126 PROC_LOCK(job->userproc); 2127 kern_psignal(job->userproc, SIGPIPE); 2128 PROC_UNLOCK(job->userproc); 2129 } 2130 error = EPIPE; 2131 goto out; 2132 } 2133 if (so->so_error) { 2134 error = so->so_error; 2135 so->so_error = 0; 2136 SOCKBUF_UNLOCK(sb); 2137 sbunlock(sb); 2138 goto out; 2139 } 2140 if ((so->so_state & SS_ISCONNECTED) == 0) { 2141 SOCKBUF_UNLOCK(sb); 2142 sbunlock(sb); 2143 error = ENOTCONN; 2144 goto out; 2145 } 2146 if (sbspace(sb) < sb->sb_lowat) { 2147 MPASS(job->aio_sent == 0 || !(so->so_state & SS_NBIO)); 2148 2149 /* 2150 * Don't block if there is too little room in the socket 2151 * buffer. Instead, requeue the request. 2152 */ 2153 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) { 2154 SOCKBUF_UNLOCK(sb); 2155 sbunlock(sb); 2156 error = ECANCELED; 2157 goto out; 2158 } 2159 TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list); 2160 SOCKBUF_UNLOCK(sb); 2161 sbunlock(sb); 2162 goto out; 2163 } 2164 2165 /* 2166 * Write as much data as the socket permits, but no more than a 2167 * a single sndbuf at a time. 2168 */ 2169 m->m_len = sbspace(sb); 2170 if (m->m_len > ab->ps.len - job->aio_sent) { 2171 m->m_len = ab->ps.len - job->aio_sent; 2172 moretocome = false; 2173 } else 2174 moretocome = true; 2175 if (m->m_len > sc->tt.sndbuf) { 2176 m->m_len = sc->tt.sndbuf; 2177 sendmore = true; 2178 } else 2179 sendmore = false; 2180 2181 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) 2182 moretocome = true; 2183 SOCKBUF_UNLOCK(sb); 2184 MPASS(m->m_len != 0); 2185 2186 /* Inlined tcp_usr_send(). */ 2187 2188 inp = toep->inp; 2189 INP_WLOCK(inp); 2190 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 2191 INP_WUNLOCK(inp); 2192 sbunlock(sb); 2193 error = ECONNRESET; 2194 goto out; 2195 } 2196 2197 refcount_acquire(&ab->refcount); 2198 m_extadd(m, NULL, ab->ps.len, t4_aiotx_mbuf_free, ab, 2199 (void *)(uintptr_t)job->aio_sent, 0, EXT_NET_DRV); 2200 m->m_ext.ext_flags |= EXT_FLAG_AIOTX; 2201 job->aio_sent += m->m_len; 2202 2203 sbappendstream(sb, m, 0); 2204 m = NULL; 2205 2206 if (!(inp->inp_flags & INP_DROPPED)) { 2207 tp = intotcpcb(inp); 2208 if (moretocome) 2209 tp->t_flags |= TF_MORETOCOME; 2210 error = tp->t_fb->tfb_tcp_output(tp); 2211 if (moretocome) 2212 tp->t_flags &= ~TF_MORETOCOME; 2213 } 2214 2215 INP_WUNLOCK(inp); 2216 if (sendmore) 2217 goto sendanother; 2218 sbunlock(sb); 2219 2220 if (error) 2221 goto out; 2222 2223 /* 2224 * If this is a non-blocking socket and the request has not 2225 * been fully completed, requeue it until the socket is ready 2226 * again. 2227 */ 2228 if (job->aio_sent < job->uaiocb.aio_nbytes && 2229 !(so->so_state & SS_NBIO)) { 2230 SOCKBUF_LOCK(sb); 2231 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) { 2232 SOCKBUF_UNLOCK(sb); 2233 error = ECANCELED; 2234 goto out; 2235 } 2236 TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list); 2237 return; 2238 } 2239 2240 /* 2241 * If the request will not be requeued, drop a reference on 2242 * the aiotx buffer. Any mbufs in flight should still 2243 * contain a reference, but this drops the reference that the 2244 * job owns while it is waiting to queue mbufs to the socket. 2245 */ 2246 free_aiotx_buffer(ab); 2247 2248 out: 2249 if (error) { 2250 if (ab != NULL) { 2251 job->aio_error = error; 2252 free_aiotx_buffer(ab); 2253 } else { 2254 MPASS(job->aio_sent == 0); 2255 aio_complete(job, -1, error); 2256 } 2257 } 2258 if (m != NULL) 2259 m_free(m); 2260 SOCKBUF_LOCK(sb); 2261 } 2262 2263 static void 2264 t4_aiotx_task(void *context, int pending) 2265 { 2266 struct toepcb *toep = context; 2267 struct inpcb *inp = toep->inp; 2268 struct socket *so = inp->inp_socket; 2269 struct kaiocb *job; 2270 2271 CURVNET_SET(toep->vnet); 2272 SOCKBUF_LOCK(&so->so_snd); 2273 while (!TAILQ_EMPTY(&toep->aiotx_jobq) && sowriteable(so)) { 2274 job = TAILQ_FIRST(&toep->aiotx_jobq); 2275 TAILQ_REMOVE(&toep->aiotx_jobq, job, list); 2276 if (!aio_clear_cancel_function(job)) 2277 continue; 2278 2279 t4_aiotx_process_job(toep, so, job); 2280 } 2281 toep->aiotx_task_active = false; 2282 SOCKBUF_UNLOCK(&so->so_snd); 2283 CURVNET_RESTORE(); 2284 2285 free_toepcb(toep); 2286 } 2287 2288 static void 2289 t4_aiotx_queue_toep(struct toepcb *toep) 2290 { 2291 2292 SOCKBUF_LOCK_ASSERT(&toep->inp->inp_socket->so_snd); 2293 #ifdef VERBOSE_TRACES 2294 CTR3(KTR_CXGBE, "%s: queueing aiotx task for tid %d, active = %s", 2295 __func__, toep->tid, toep->aiotx_task_active ? "true" : "false"); 2296 #endif 2297 if (toep->aiotx_task_active) 2298 return; 2299 toep->aiotx_task_active = true; 2300 hold_toepcb(toep); 2301 soaio_enqueue(&toep->aiotx_task); 2302 } 2303 2304 static void 2305 t4_aiotx_cancel(struct kaiocb *job) 2306 { 2307 struct aiotx_buffer *ab; 2308 struct socket *so; 2309 struct sockbuf *sb; 2310 struct tcpcb *tp; 2311 struct toepcb *toep; 2312 2313 so = job->fd_file->f_data; 2314 tp = so_sototcpcb(so); 2315 toep = tp->t_toe; 2316 MPASS(job->uaiocb.aio_lio_opcode == LIO_WRITE); 2317 sb = &so->so_snd; 2318 2319 SOCKBUF_LOCK(sb); 2320 if (!aio_cancel_cleared(job)) 2321 TAILQ_REMOVE(&toep->aiotx_jobq, job, list); 2322 SOCKBUF_UNLOCK(sb); 2323 2324 ab = job->backend1; 2325 if (ab != NULL) 2326 free_aiotx_buffer(ab); 2327 else 2328 aio_cancel(job); 2329 } 2330 2331 int 2332 t4_aio_queue_aiotx(struct socket *so, struct kaiocb *job) 2333 { 2334 struct tcpcb *tp = so_sototcpcb(so); 2335 struct toepcb *toep = tp->t_toe; 2336 struct adapter *sc = td_adapter(toep->td); 2337 2338 /* This only handles writes. */ 2339 if (job->uaiocb.aio_lio_opcode != LIO_WRITE) 2340 return (EOPNOTSUPP); 2341 2342 if (!sc->tt.tx_zcopy) 2343 return (EOPNOTSUPP); 2344 2345 if (tls_tx_key(toep)) 2346 return (EOPNOTSUPP); 2347 2348 SOCKBUF_LOCK(&so->so_snd); 2349 #ifdef VERBOSE_TRACES 2350 CTR2(KTR_CXGBE, "%s: queueing %p", __func__, job); 2351 #endif 2352 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) 2353 panic("new job was cancelled"); 2354 TAILQ_INSERT_TAIL(&toep->aiotx_jobq, job, list); 2355 if (sowriteable(so)) 2356 t4_aiotx_queue_toep(toep); 2357 SOCKBUF_UNLOCK(&so->so_snd); 2358 return (0); 2359 } 2360 2361 void 2362 aiotx_init_toep(struct toepcb *toep) 2363 { 2364 2365 TAILQ_INIT(&toep->aiotx_jobq); 2366 TASK_INIT(&toep->aiotx_task, 0, t4_aiotx_task, toep); 2367 } 2368 #endif 2369