1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2012 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_kern_tls.h" 34 #include "opt_ratelimit.h" 35 36 #include <sys/param.h> 37 #include <sys/types.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/ktr.h> 41 #include <sys/lock.h> 42 #include <sys/limits.h> 43 #include <sys/module.h> 44 #include <sys/protosw.h> 45 #include <sys/domain.h> 46 #include <sys/refcount.h> 47 #include <sys/rmlock.h> 48 #include <sys/socket.h> 49 #include <sys/socketvar.h> 50 #include <sys/sysctl.h> 51 #include <sys/taskqueue.h> 52 #include <net/if.h> 53 #include <net/if_var.h> 54 #include <net/if_types.h> 55 #include <net/if_vlan_var.h> 56 #include <netinet/in.h> 57 #include <netinet/in_pcb.h> 58 #include <netinet/in_var.h> 59 #include <netinet/ip.h> 60 #include <netinet/ip6.h> 61 #include <netinet6/scope6_var.h> 62 #define TCPSTATES 63 #include <netinet/tcp_fsm.h> 64 #include <netinet/tcp_seq.h> 65 #include <netinet/tcp_timer.h> 66 #include <netinet/tcp_var.h> 67 #include <netinet/toecore.h> 68 #include <netinet/cc/cc.h> 69 70 #ifdef TCP_OFFLOAD 71 #include "common/common.h" 72 #include "common/t4_msg.h" 73 #include "common/t4_regs.h" 74 #include "common/t4_regs_values.h" 75 #include "common/t4_tcb.h" 76 #include "t4_clip.h" 77 #include "tom/t4_tom_l2t.h" 78 #include "tom/t4_tom.h" 79 #include "tom/t4_tls.h" 80 81 static struct protosw toe_protosw; 82 static struct protosw toe6_protosw; 83 84 /* Module ops */ 85 static int t4_tom_mod_load(void); 86 static int t4_tom_mod_unload(void); 87 static int t4_tom_modevent(module_t, int, void *); 88 89 /* ULD ops and helpers */ 90 static int t4_tom_activate(struct adapter *); 91 static int t4_tom_deactivate(struct adapter *); 92 93 static struct uld_info tom_uld_info = { 94 .uld_id = ULD_TOM, 95 .activate = t4_tom_activate, 96 .deactivate = t4_tom_deactivate, 97 }; 98 99 static void release_offload_resources(struct toepcb *); 100 static int alloc_tid_tabs(struct tid_info *); 101 static void free_tid_tabs(struct tid_info *); 102 static void free_tom_data(struct adapter *, struct tom_data *); 103 static void reclaim_wr_resources(void *, int); 104 105 struct toepcb * 106 alloc_toepcb(struct vi_info *vi, int flags) 107 { 108 struct port_info *pi = vi->pi; 109 struct adapter *sc = pi->adapter; 110 struct toepcb *toep; 111 int tx_credits, txsd_total, len; 112 113 /* 114 * The firmware counts tx work request credits in units of 16 bytes 115 * each. Reserve room for an ABORT_REQ so the driver never has to worry 116 * about tx credits if it wants to abort a connection. 117 */ 118 tx_credits = sc->params.ofldq_wr_cred; 119 tx_credits -= howmany(sizeof(struct cpl_abort_req), 16); 120 121 /* 122 * Shortest possible tx work request is a fw_ofld_tx_data_wr + 1 byte 123 * immediate payload, and firmware counts tx work request credits in 124 * units of 16 byte. Calculate the maximum work requests possible. 125 */ 126 txsd_total = tx_credits / 127 howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16); 128 129 len = offsetof(struct toepcb, txsd) + 130 txsd_total * sizeof(struct ofld_tx_sdesc); 131 132 toep = malloc(len, M_CXGBE, M_ZERO | flags); 133 if (toep == NULL) 134 return (NULL); 135 136 refcount_init(&toep->refcount, 1); 137 toep->td = sc->tom_softc; 138 toep->vi = vi; 139 toep->tid = -1; 140 toep->tx_total = tx_credits; 141 toep->tx_credits = tx_credits; 142 mbufq_init(&toep->ulp_pduq, INT_MAX); 143 mbufq_init(&toep->ulp_pdu_reclaimq, INT_MAX); 144 toep->txsd_total = txsd_total; 145 toep->txsd_avail = txsd_total; 146 toep->txsd_pidx = 0; 147 toep->txsd_cidx = 0; 148 aiotx_init_toep(toep); 149 150 return (toep); 151 } 152 153 /* 154 * Initialize a toepcb after its params have been filled out. 155 */ 156 int 157 init_toepcb(struct vi_info *vi, struct toepcb *toep) 158 { 159 struct conn_params *cp = &toep->params; 160 struct port_info *pi = vi->pi; 161 struct adapter *sc = pi->adapter; 162 struct tx_cl_rl_params *tc; 163 164 if (cp->tc_idx >= 0 && cp->tc_idx < sc->params.nsched_cls) { 165 tc = &pi->sched_params->cl_rl[cp->tc_idx]; 166 mtx_lock(&sc->tc_lock); 167 if (tc->state != CS_HW_CONFIGURED) { 168 CH_ERR(vi, "tid %d cannot be bound to traffic class %d " 169 "because it is not configured (its state is %d)\n", 170 toep->tid, cp->tc_idx, tc->state); 171 cp->tc_idx = -1; 172 } else { 173 tc->refcount++; 174 } 175 mtx_unlock(&sc->tc_lock); 176 } 177 toep->ofld_txq = &sc->sge.ofld_txq[cp->txq_idx]; 178 toep->ofld_rxq = &sc->sge.ofld_rxq[cp->rxq_idx]; 179 toep->ctrlq = &sc->sge.ctrlq[pi->port_id]; 180 181 tls_init_toep(toep); 182 if (ulp_mode(toep) == ULP_MODE_TCPDDP) 183 ddp_init_toep(toep); 184 185 toep->flags |= TPF_INITIALIZED; 186 187 return (0); 188 } 189 190 struct toepcb * 191 hold_toepcb(struct toepcb *toep) 192 { 193 194 refcount_acquire(&toep->refcount); 195 return (toep); 196 } 197 198 void 199 free_toepcb(struct toepcb *toep) 200 { 201 202 if (refcount_release(&toep->refcount) == 0) 203 return; 204 205 KASSERT(!(toep->flags & TPF_ATTACHED), 206 ("%s: attached to an inpcb", __func__)); 207 KASSERT(!(toep->flags & TPF_CPL_PENDING), 208 ("%s: CPL pending", __func__)); 209 210 if (toep->flags & TPF_INITIALIZED) { 211 if (ulp_mode(toep) == ULP_MODE_TCPDDP) 212 ddp_uninit_toep(toep); 213 tls_uninit_toep(toep); 214 } 215 free(toep, M_CXGBE); 216 } 217 218 /* 219 * Set up the socket for TCP offload. 220 */ 221 void 222 offload_socket(struct socket *so, struct toepcb *toep) 223 { 224 struct tom_data *td = toep->td; 225 struct inpcb *inp = sotoinpcb(so); 226 struct tcpcb *tp = intotcpcb(inp); 227 struct sockbuf *sb; 228 229 INP_WLOCK_ASSERT(inp); 230 231 /* Update socket */ 232 sb = &so->so_snd; 233 SOCKBUF_LOCK(sb); 234 sb->sb_flags |= SB_NOCOALESCE; 235 SOCKBUF_UNLOCK(sb); 236 sb = &so->so_rcv; 237 SOCKBUF_LOCK(sb); 238 sb->sb_flags |= SB_NOCOALESCE; 239 if (inp->inp_vflag & INP_IPV6) 240 so->so_proto = &toe6_protosw; 241 else 242 so->so_proto = &toe_protosw; 243 SOCKBUF_UNLOCK(sb); 244 245 /* Update TCP PCB */ 246 tp->tod = &td->tod; 247 tp->t_toe = toep; 248 tp->t_flags |= TF_TOE; 249 250 /* Install an extra hold on inp */ 251 toep->inp = inp; 252 toep->flags |= TPF_ATTACHED; 253 in_pcbref(inp); 254 255 /* Add the TOE PCB to the active list */ 256 mtx_lock(&td->toep_list_lock); 257 TAILQ_INSERT_HEAD(&td->toep_list, toep, link); 258 mtx_unlock(&td->toep_list_lock); 259 } 260 261 void 262 restore_so_proto(struct socket *so, bool v6) 263 { 264 if (v6) 265 so->so_proto = &tcp6_protosw; 266 else 267 so->so_proto = &tcp_protosw; 268 } 269 270 /* This is _not_ the normal way to "unoffload" a socket. */ 271 void 272 undo_offload_socket(struct socket *so) 273 { 274 struct inpcb *inp = sotoinpcb(so); 275 struct tcpcb *tp = intotcpcb(inp); 276 struct toepcb *toep = tp->t_toe; 277 struct tom_data *td = toep->td; 278 struct sockbuf *sb; 279 280 INP_WLOCK_ASSERT(inp); 281 282 sb = &so->so_snd; 283 SOCKBUF_LOCK(sb); 284 sb->sb_flags &= ~SB_NOCOALESCE; 285 SOCKBUF_UNLOCK(sb); 286 sb = &so->so_rcv; 287 SOCKBUF_LOCK(sb); 288 sb->sb_flags &= ~SB_NOCOALESCE; 289 restore_so_proto(so, inp->inp_vflag & INP_IPV6); 290 SOCKBUF_UNLOCK(sb); 291 292 tp->tod = NULL; 293 tp->t_toe = NULL; 294 tp->t_flags &= ~TF_TOE; 295 296 toep->inp = NULL; 297 toep->flags &= ~TPF_ATTACHED; 298 if (in_pcbrele_wlocked(inp)) 299 panic("%s: inp freed.", __func__); 300 301 mtx_lock(&td->toep_list_lock); 302 TAILQ_REMOVE(&td->toep_list, toep, link); 303 mtx_unlock(&td->toep_list_lock); 304 } 305 306 static void 307 release_offload_resources(struct toepcb *toep) 308 { 309 struct tom_data *td = toep->td; 310 struct adapter *sc = td_adapter(td); 311 int tid = toep->tid; 312 313 KASSERT(!(toep->flags & TPF_CPL_PENDING), 314 ("%s: %p has CPL pending.", __func__, toep)); 315 KASSERT(!(toep->flags & TPF_ATTACHED), 316 ("%s: %p is still attached.", __func__, toep)); 317 318 CTR5(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p, ce %p)", 319 __func__, toep, tid, toep->l2te, toep->ce); 320 321 /* 322 * These queues should have been emptied at approximately the same time 323 * that a normal connection's socket's so_snd would have been purged or 324 * drained. Do _not_ clean up here. 325 */ 326 MPASS(mbufq_len(&toep->ulp_pduq) == 0); 327 MPASS(mbufq_len(&toep->ulp_pdu_reclaimq) == 0); 328 #ifdef INVARIANTS 329 if (ulp_mode(toep) == ULP_MODE_TCPDDP) 330 ddp_assert_empty(toep); 331 #endif 332 MPASS(TAILQ_EMPTY(&toep->aiotx_jobq)); 333 334 if (toep->l2te) 335 t4_l2t_release(toep->l2te); 336 337 if (tid >= 0) { 338 remove_tid(sc, tid, toep->ce ? 2 : 1); 339 release_tid(sc, tid, toep->ctrlq); 340 } 341 342 if (toep->ce) 343 t4_release_clip_entry(sc, toep->ce); 344 345 if (toep->params.tc_idx != -1) 346 t4_release_cl_rl(sc, toep->vi->pi->port_id, toep->params.tc_idx); 347 348 mtx_lock(&td->toep_list_lock); 349 TAILQ_REMOVE(&td->toep_list, toep, link); 350 mtx_unlock(&td->toep_list_lock); 351 352 free_toepcb(toep); 353 } 354 355 /* 356 * The kernel is done with the TCP PCB and this is our opportunity to unhook the 357 * toepcb hanging off of it. If the TOE driver is also done with the toepcb (no 358 * pending CPL) then it is time to release all resources tied to the toepcb. 359 * 360 * Also gets called when an offloaded active open fails and the TOM wants the 361 * kernel to take the TCP PCB back. 362 */ 363 static void 364 t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp) 365 { 366 #if defined(KTR) || defined(INVARIANTS) 367 struct inpcb *inp = tptoinpcb(tp); 368 #endif 369 struct toepcb *toep = tp->t_toe; 370 371 INP_WLOCK_ASSERT(inp); 372 373 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 374 KASSERT(toep->flags & TPF_ATTACHED, 375 ("%s: not attached", __func__)); 376 377 #ifdef KTR 378 if (tp->t_state == TCPS_SYN_SENT) { 379 CTR6(KTR_CXGBE, "%s: atid %d, toep %p (0x%x), inp %p (0x%x)", 380 __func__, toep->tid, toep, toep->flags, inp, 381 inp->inp_flags); 382 } else { 383 CTR6(KTR_CXGBE, 384 "t4_pcb_detach: tid %d (%s), toep %p (0x%x), inp %p (0x%x)", 385 toep->tid, tcpstates[tp->t_state], toep, toep->flags, inp, 386 inp->inp_flags); 387 } 388 #endif 389 390 tp->tod = NULL; 391 tp->t_toe = NULL; 392 tp->t_flags &= ~TF_TOE; 393 toep->flags &= ~TPF_ATTACHED; 394 395 if (!(toep->flags & TPF_CPL_PENDING)) 396 release_offload_resources(toep); 397 } 398 399 /* 400 * setsockopt handler. 401 */ 402 static void 403 t4_ctloutput(struct toedev *tod, struct tcpcb *tp, int dir, int name) 404 { 405 struct adapter *sc = tod->tod_softc; 406 struct toepcb *toep = tp->t_toe; 407 408 if (dir == SOPT_GET) 409 return; 410 411 CTR4(KTR_CXGBE, "%s: tp %p, dir %u, name %u", __func__, tp, dir, name); 412 413 switch (name) { 414 case TCP_NODELAY: 415 if (tp->t_state != TCPS_ESTABLISHED) 416 break; 417 toep->params.nagle = tp->t_flags & TF_NODELAY ? 0 : 1; 418 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS, 419 V_TF_NAGLE(1), V_TF_NAGLE(toep->params.nagle), 0, 0); 420 break; 421 default: 422 break; 423 } 424 } 425 426 static inline uint64_t 427 get_tcb_tflags(const uint64_t *tcb) 428 { 429 430 return ((be64toh(tcb[14]) << 32) | (be64toh(tcb[15]) >> 32)); 431 } 432 433 static inline uint32_t 434 get_tcb_field(const uint64_t *tcb, u_int word, uint32_t mask, u_int shift) 435 { 436 #define LAST_WORD ((TCB_SIZE / 4) - 1) 437 uint64_t t1, t2; 438 int flit_idx; 439 440 MPASS(mask != 0); 441 MPASS(word <= LAST_WORD); 442 MPASS(shift < 32); 443 444 flit_idx = (LAST_WORD - word) / 2; 445 if (word & 0x1) 446 shift += 32; 447 t1 = be64toh(tcb[flit_idx]) >> shift; 448 t2 = 0; 449 if (fls(mask) > 64 - shift) { 450 /* 451 * Will spill over into the next logical flit, which is the flit 452 * before this one. The flit_idx before this one must be valid. 453 */ 454 MPASS(flit_idx > 0); 455 t2 = be64toh(tcb[flit_idx - 1]) << (64 - shift); 456 } 457 return ((t2 | t1) & mask); 458 #undef LAST_WORD 459 } 460 #define GET_TCB_FIELD(tcb, F) \ 461 get_tcb_field(tcb, W_TCB_##F, M_TCB_##F, S_TCB_##F) 462 463 /* 464 * Issues a CPL_GET_TCB to read the entire TCB for the tid. 465 */ 466 static int 467 send_get_tcb(struct adapter *sc, u_int tid) 468 { 469 struct cpl_get_tcb *cpl; 470 struct wrq_cookie cookie; 471 472 MPASS(tid >= sc->tids.tid_base); 473 MPASS(tid - sc->tids.tid_base < sc->tids.ntids); 474 475 cpl = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*cpl), 16), 476 &cookie); 477 if (__predict_false(cpl == NULL)) 478 return (ENOMEM); 479 bzero(cpl, sizeof(*cpl)); 480 INIT_TP_WR(cpl, tid); 481 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_GET_TCB, tid)); 482 cpl->reply_ctrl = htobe16(V_REPLY_CHAN(0) | 483 V_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id)); 484 cpl->cookie = 0xff; 485 commit_wrq_wr(&sc->sge.ctrlq[0], cpl, &cookie); 486 487 return (0); 488 } 489 490 static struct tcb_histent * 491 alloc_tcb_histent(struct adapter *sc, u_int tid, int flags) 492 { 493 struct tcb_histent *te; 494 495 MPASS(flags == M_NOWAIT || flags == M_WAITOK); 496 497 te = malloc(sizeof(*te), M_CXGBE, M_ZERO | flags); 498 if (te == NULL) 499 return (NULL); 500 mtx_init(&te->te_lock, "TCB entry", NULL, MTX_DEF); 501 callout_init_mtx(&te->te_callout, &te->te_lock, 0); 502 te->te_adapter = sc; 503 te->te_tid = tid; 504 505 return (te); 506 } 507 508 static void 509 free_tcb_histent(struct tcb_histent *te) 510 { 511 512 mtx_destroy(&te->te_lock); 513 free(te, M_CXGBE); 514 } 515 516 /* 517 * Start tracking the tid in the TCB history. 518 */ 519 int 520 add_tid_to_history(struct adapter *sc, u_int tid) 521 { 522 struct tcb_histent *te = NULL; 523 struct tom_data *td = sc->tom_softc; 524 int rc; 525 526 MPASS(tid >= sc->tids.tid_base); 527 MPASS(tid - sc->tids.tid_base < sc->tids.ntids); 528 529 if (td->tcb_history == NULL) 530 return (ENXIO); 531 532 rw_wlock(&td->tcb_history_lock); 533 if (td->tcb_history[tid] != NULL) { 534 rc = EEXIST; 535 goto done; 536 } 537 te = alloc_tcb_histent(sc, tid, M_NOWAIT); 538 if (te == NULL) { 539 rc = ENOMEM; 540 goto done; 541 } 542 mtx_lock(&te->te_lock); 543 rc = send_get_tcb(sc, tid); 544 if (rc == 0) { 545 te->te_flags |= TE_RPL_PENDING; 546 td->tcb_history[tid] = te; 547 } else { 548 free(te, M_CXGBE); 549 } 550 mtx_unlock(&te->te_lock); 551 done: 552 rw_wunlock(&td->tcb_history_lock); 553 return (rc); 554 } 555 556 static void 557 remove_tcb_histent(struct tcb_histent *te) 558 { 559 struct adapter *sc = te->te_adapter; 560 struct tom_data *td = sc->tom_softc; 561 562 rw_assert(&td->tcb_history_lock, RA_WLOCKED); 563 mtx_assert(&te->te_lock, MA_OWNED); 564 MPASS(td->tcb_history[te->te_tid] == te); 565 566 td->tcb_history[te->te_tid] = NULL; 567 free_tcb_histent(te); 568 rw_wunlock(&td->tcb_history_lock); 569 } 570 571 static inline struct tcb_histent * 572 lookup_tcb_histent(struct adapter *sc, u_int tid, bool addrem) 573 { 574 struct tcb_histent *te; 575 struct tom_data *td = sc->tom_softc; 576 577 MPASS(tid >= sc->tids.tid_base); 578 MPASS(tid - sc->tids.tid_base < sc->tids.ntids); 579 580 if (td->tcb_history == NULL) 581 return (NULL); 582 583 if (addrem) 584 rw_wlock(&td->tcb_history_lock); 585 else 586 rw_rlock(&td->tcb_history_lock); 587 te = td->tcb_history[tid]; 588 if (te != NULL) { 589 mtx_lock(&te->te_lock); 590 return (te); /* with both locks held */ 591 } 592 if (addrem) 593 rw_wunlock(&td->tcb_history_lock); 594 else 595 rw_runlock(&td->tcb_history_lock); 596 597 return (te); 598 } 599 600 static inline void 601 release_tcb_histent(struct tcb_histent *te) 602 { 603 struct adapter *sc = te->te_adapter; 604 struct tom_data *td = sc->tom_softc; 605 606 mtx_assert(&te->te_lock, MA_OWNED); 607 mtx_unlock(&te->te_lock); 608 rw_assert(&td->tcb_history_lock, RA_RLOCKED); 609 rw_runlock(&td->tcb_history_lock); 610 } 611 612 static void 613 request_tcb(void *arg) 614 { 615 struct tcb_histent *te = arg; 616 617 mtx_assert(&te->te_lock, MA_OWNED); 618 619 /* Noone else is supposed to update the histent. */ 620 MPASS(!(te->te_flags & TE_RPL_PENDING)); 621 if (send_get_tcb(te->te_adapter, te->te_tid) == 0) 622 te->te_flags |= TE_RPL_PENDING; 623 else 624 callout_schedule(&te->te_callout, hz / 100); 625 } 626 627 static void 628 update_tcb_histent(struct tcb_histent *te, const uint64_t *tcb) 629 { 630 struct tom_data *td = te->te_adapter->tom_softc; 631 uint64_t tflags = get_tcb_tflags(tcb); 632 uint8_t sample = 0; 633 634 if (GET_TCB_FIELD(tcb, SND_MAX_RAW) != GET_TCB_FIELD(tcb, SND_UNA_RAW)) { 635 if (GET_TCB_FIELD(tcb, T_RXTSHIFT) != 0) 636 sample |= TS_RTO; 637 if (GET_TCB_FIELD(tcb, T_DUPACKS) != 0) 638 sample |= TS_DUPACKS; 639 if (GET_TCB_FIELD(tcb, T_DUPACKS) >= td->dupack_threshold) 640 sample |= TS_FASTREXMT; 641 } 642 643 if (GET_TCB_FIELD(tcb, SND_MAX_RAW) != 0) { 644 uint32_t snd_wnd; 645 646 sample |= TS_SND_BACKLOGGED; /* for whatever reason. */ 647 648 snd_wnd = GET_TCB_FIELD(tcb, RCV_ADV); 649 if (tflags & V_TF_RECV_SCALE(1)) 650 snd_wnd <<= GET_TCB_FIELD(tcb, RCV_SCALE); 651 if (GET_TCB_FIELD(tcb, SND_CWND) < snd_wnd) 652 sample |= TS_CWND_LIMITED; /* maybe due to CWND */ 653 } 654 655 if (tflags & V_TF_CCTRL_ECN(1)) { 656 657 /* 658 * CE marker on incoming IP hdr, echoing ECE back in the TCP 659 * hdr. Indicates congestion somewhere on the way from the peer 660 * to this node. 661 */ 662 if (tflags & V_TF_CCTRL_ECE(1)) 663 sample |= TS_ECN_ECE; 664 665 /* 666 * ECE seen and CWR sent (or about to be sent). Might indicate 667 * congestion on the way to the peer. This node is reducing its 668 * congestion window in response. 669 */ 670 if (tflags & (V_TF_CCTRL_CWR(1) | V_TF_CCTRL_RFR(1))) 671 sample |= TS_ECN_CWR; 672 } 673 674 te->te_sample[te->te_pidx] = sample; 675 if (++te->te_pidx == nitems(te->te_sample)) 676 te->te_pidx = 0; 677 memcpy(te->te_tcb, tcb, TCB_SIZE); 678 te->te_flags |= TE_ACTIVE; 679 } 680 681 static int 682 do_get_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 683 { 684 struct adapter *sc = iq->adapter; 685 const struct cpl_get_tcb_rpl *cpl = mtod(m, const void *); 686 const uint64_t *tcb = (const uint64_t *)(const void *)(cpl + 1); 687 struct tcb_histent *te; 688 const u_int tid = GET_TID(cpl); 689 bool remove; 690 691 remove = GET_TCB_FIELD(tcb, T_STATE) == TCPS_CLOSED; 692 te = lookup_tcb_histent(sc, tid, remove); 693 if (te == NULL) { 694 /* Not in the history. Who issued the GET_TCB for this? */ 695 device_printf(sc->dev, "tcb %u: flags 0x%016jx, state %u, " 696 "srtt %u, sscale %u, rscale %u, cookie 0x%x\n", tid, 697 (uintmax_t)get_tcb_tflags(tcb), GET_TCB_FIELD(tcb, T_STATE), 698 GET_TCB_FIELD(tcb, T_SRTT), GET_TCB_FIELD(tcb, SND_SCALE), 699 GET_TCB_FIELD(tcb, RCV_SCALE), cpl->cookie); 700 goto done; 701 } 702 703 MPASS(te->te_flags & TE_RPL_PENDING); 704 te->te_flags &= ~TE_RPL_PENDING; 705 if (remove) { 706 remove_tcb_histent(te); 707 } else { 708 update_tcb_histent(te, tcb); 709 callout_reset(&te->te_callout, hz / 10, request_tcb, te); 710 release_tcb_histent(te); 711 } 712 done: 713 m_freem(m); 714 return (0); 715 } 716 717 static void 718 fill_tcp_info_from_tcb(struct adapter *sc, uint64_t *tcb, struct tcp_info *ti) 719 { 720 uint32_t v; 721 722 ti->tcpi_state = GET_TCB_FIELD(tcb, T_STATE); 723 724 v = GET_TCB_FIELD(tcb, T_SRTT); 725 ti->tcpi_rtt = tcp_ticks_to_us(sc, v); 726 727 v = GET_TCB_FIELD(tcb, T_RTTVAR); 728 ti->tcpi_rttvar = tcp_ticks_to_us(sc, v); 729 730 ti->tcpi_snd_ssthresh = GET_TCB_FIELD(tcb, SND_SSTHRESH); 731 ti->tcpi_snd_cwnd = GET_TCB_FIELD(tcb, SND_CWND); 732 ti->tcpi_rcv_nxt = GET_TCB_FIELD(tcb, RCV_NXT); 733 734 v = GET_TCB_FIELD(tcb, TX_MAX); 735 ti->tcpi_snd_nxt = v - GET_TCB_FIELD(tcb, SND_NXT_RAW); 736 737 /* Receive window being advertised by us. */ 738 ti->tcpi_rcv_wscale = GET_TCB_FIELD(tcb, SND_SCALE); /* Yes, SND. */ 739 ti->tcpi_rcv_space = GET_TCB_FIELD(tcb, RCV_WND); 740 741 /* Send window */ 742 ti->tcpi_snd_wscale = GET_TCB_FIELD(tcb, RCV_SCALE); /* Yes, RCV. */ 743 ti->tcpi_snd_wnd = GET_TCB_FIELD(tcb, RCV_ADV); 744 if (get_tcb_tflags(tcb) & V_TF_RECV_SCALE(1)) 745 ti->tcpi_snd_wnd <<= ti->tcpi_snd_wscale; 746 else 747 ti->tcpi_snd_wscale = 0; 748 749 } 750 751 static void 752 fill_tcp_info_from_history(struct adapter *sc, struct tcb_histent *te, 753 struct tcp_info *ti) 754 { 755 756 fill_tcp_info_from_tcb(sc, te->te_tcb, ti); 757 } 758 759 /* 760 * Reads the TCB for the given tid using a memory window and copies it to 'buf' 761 * in the same format as CPL_GET_TCB_RPL. 762 */ 763 static void 764 read_tcb_using_memwin(struct adapter *sc, u_int tid, uint64_t *buf) 765 { 766 int i, j, k, rc; 767 uint32_t addr; 768 u_char *tcb, tmp; 769 770 MPASS(tid >= sc->tids.tid_base); 771 MPASS(tid - sc->tids.tid_base < sc->tids.ntids); 772 773 addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE; 774 rc = read_via_memwin(sc, 2, addr, (uint32_t *)buf, TCB_SIZE); 775 if (rc != 0) 776 return; 777 778 tcb = (u_char *)buf; 779 for (i = 0, j = TCB_SIZE - 16; i < j; i += 16, j -= 16) { 780 for (k = 0; k < 16; k++) { 781 tmp = tcb[i + k]; 782 tcb[i + k] = tcb[j + k]; 783 tcb[j + k] = tmp; 784 } 785 } 786 } 787 788 static void 789 fill_tcp_info(struct adapter *sc, u_int tid, struct tcp_info *ti) 790 { 791 uint64_t tcb[TCB_SIZE / sizeof(uint64_t)]; 792 struct tcb_histent *te; 793 794 ti->tcpi_toe_tid = tid; 795 te = lookup_tcb_histent(sc, tid, false); 796 if (te != NULL) { 797 fill_tcp_info_from_history(sc, te, ti); 798 release_tcb_histent(te); 799 } else { 800 if (!(sc->debug_flags & DF_DISABLE_TCB_CACHE)) { 801 /* XXX: tell firmware to flush TCB cache. */ 802 } 803 read_tcb_using_memwin(sc, tid, tcb); 804 fill_tcp_info_from_tcb(sc, tcb, ti); 805 } 806 } 807 808 /* 809 * Called by the kernel to allow the TOE driver to "refine" values filled up in 810 * the tcp_info for an offloaded connection. 811 */ 812 static void 813 t4_tcp_info(struct toedev *tod, struct tcpcb *tp, struct tcp_info *ti) 814 { 815 struct adapter *sc = tod->tod_softc; 816 struct toepcb *toep = tp->t_toe; 817 818 INP_WLOCK_ASSERT(tptoinpcb(tp)); 819 MPASS(ti != NULL); 820 821 fill_tcp_info(sc, toep->tid, ti); 822 } 823 824 #ifdef KERN_TLS 825 static int 826 t4_alloc_tls_session(struct toedev *tod, struct tcpcb *tp, 827 struct ktls_session *tls, int direction) 828 { 829 struct toepcb *toep = tp->t_toe; 830 831 INP_WLOCK_ASSERT(tptoinpcb(tp)); 832 MPASS(tls != NULL); 833 834 return (tls_alloc_ktls(toep, tls, direction)); 835 } 836 #endif 837 838 /* SET_TCB_FIELD sent as a ULP command looks like this */ 839 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \ 840 sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core)) 841 842 static void * 843 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask, 844 uint64_t val, uint32_t tid) 845 { 846 struct ulptx_idata *ulpsc; 847 struct cpl_set_tcb_field_core *req; 848 849 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); 850 ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16)); 851 852 ulpsc = (struct ulptx_idata *)(ulpmc + 1); 853 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 854 ulpsc->len = htobe32(sizeof(*req)); 855 856 req = (struct cpl_set_tcb_field_core *)(ulpsc + 1); 857 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 858 req->reply_ctrl = htobe16(V_NO_REPLY(1)); 859 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0)); 860 req->mask = htobe64(mask); 861 req->val = htobe64(val); 862 863 ulpsc = (struct ulptx_idata *)(req + 1); 864 if (LEN__SET_TCB_FIELD_ULP % 16) { 865 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 866 ulpsc->len = htobe32(0); 867 return (ulpsc + 1); 868 } 869 return (ulpsc); 870 } 871 872 static void 873 send_mss_flowc_wr(struct adapter *sc, struct toepcb *toep) 874 { 875 struct wrq_cookie cookie; 876 struct fw_flowc_wr *flowc; 877 struct ofld_tx_sdesc *txsd; 878 const int flowclen = sizeof(*flowc) + sizeof(struct fw_flowc_mnemval); 879 const int flowclen16 = howmany(flowclen, 16); 880 881 if (toep->tx_credits < flowclen16 || toep->txsd_avail == 0) { 882 CH_ERR(sc, "%s: tid %u out of tx credits (%d, %d).\n", __func__, 883 toep->tid, toep->tx_credits, toep->txsd_avail); 884 return; 885 } 886 887 flowc = start_wrq_wr(&toep->ofld_txq->wrq, flowclen16, &cookie); 888 if (__predict_false(flowc == NULL)) { 889 CH_ERR(sc, "ENOMEM in %s for tid %u.\n", __func__, toep->tid); 890 return; 891 } 892 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 893 V_FW_FLOWC_WR_NPARAMS(1)); 894 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(flowclen16) | 895 V_FW_WR_FLOWID(toep->tid)); 896 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_MSS; 897 flowc->mnemval[0].val = htobe32(toep->params.emss); 898 899 txsd = &toep->txsd[toep->txsd_pidx]; 900 txsd->tx_credits = flowclen16; 901 txsd->plen = 0; 902 toep->tx_credits -= txsd->tx_credits; 903 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 904 toep->txsd_pidx = 0; 905 toep->txsd_avail--; 906 commit_wrq_wr(&toep->ofld_txq->wrq, flowc, &cookie); 907 } 908 909 static void 910 t4_pmtu_update(struct toedev *tod, struct tcpcb *tp, tcp_seq seq, int mtu) 911 { 912 struct work_request_hdr *wrh; 913 struct ulp_txpkt *ulpmc; 914 int idx, len; 915 struct wrq_cookie cookie; 916 struct inpcb *inp = tptoinpcb(tp); 917 struct toepcb *toep = tp->t_toe; 918 struct adapter *sc = td_adapter(toep->td); 919 unsigned short *mtus = &sc->params.mtus[0]; 920 921 INP_WLOCK_ASSERT(inp); 922 MPASS(mtu > 0); /* kernel is supposed to provide something usable. */ 923 924 /* tp->snd_una and snd_max are in host byte order too. */ 925 seq = be32toh(seq); 926 927 CTR6(KTR_CXGBE, "%s: tid %d, seq 0x%08x, mtu %u, mtu_idx %u (%d)", 928 __func__, toep->tid, seq, mtu, toep->params.mtu_idx, 929 mtus[toep->params.mtu_idx]); 930 931 if (ulp_mode(toep) == ULP_MODE_NONE && /* XXX: Read TCB otherwise? */ 932 (SEQ_LT(seq, tp->snd_una) || SEQ_GEQ(seq, tp->snd_max))) { 933 CTR5(KTR_CXGBE, 934 "%s: tid %d, seq 0x%08x not in range [0x%08x, 0x%08x).", 935 __func__, toep->tid, seq, tp->snd_una, tp->snd_max); 936 return; 937 } 938 939 /* Find the best mtu_idx for the suggested MTU. */ 940 for (idx = 0; idx < NMTUS - 1 && mtus[idx + 1] <= mtu; idx++) 941 continue; 942 if (idx >= toep->params.mtu_idx) 943 return; /* Never increase the PMTU (just like the kernel). */ 944 945 /* 946 * We'll send a compound work request with 2 SET_TCB_FIELDs -- the first 947 * one updates the mtu_idx and the second one triggers a retransmit. 948 */ 949 len = sizeof(*wrh) + 2 * roundup2(LEN__SET_TCB_FIELD_ULP, 16); 950 wrh = start_wrq_wr(toep->ctrlq, howmany(len, 16), &cookie); 951 if (wrh == NULL) { 952 CH_ERR(sc, "failed to change mtu_idx of tid %d (%u -> %u).\n", 953 toep->tid, toep->params.mtu_idx, idx); 954 return; 955 } 956 INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */ 957 ulpmc = (struct ulp_txpkt *)(wrh + 1); 958 ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_T_MAXSEG, 959 V_TCB_T_MAXSEG(M_TCB_T_MAXSEG), V_TCB_T_MAXSEG(idx), toep->tid); 960 ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_TIMESTAMP, 961 V_TCB_TIMESTAMP(0x7FFFFULL << 11), 0, toep->tid); 962 commit_wrq_wr(toep->ctrlq, wrh, &cookie); 963 964 /* Update the software toepcb and tcpcb. */ 965 toep->params.mtu_idx = idx; 966 tp->t_maxseg = mtus[toep->params.mtu_idx]; 967 if (inp->inp_inc.inc_flags & INC_ISIPV6) 968 tp->t_maxseg -= sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 969 else 970 tp->t_maxseg -= sizeof(struct ip) + sizeof(struct tcphdr); 971 toep->params.emss = tp->t_maxseg; 972 if (tp->t_flags & TF_RCVD_TSTMP) 973 toep->params.emss -= TCPOLEN_TSTAMP_APPA; 974 975 /* Update the firmware flowc. */ 976 send_mss_flowc_wr(sc, toep); 977 978 /* Update the MTU in the kernel's hostcache. */ 979 if (sc->tt.update_hc_on_pmtu_change != 0) { 980 struct in_conninfo inc = {0}; 981 982 inc.inc_fibnum = inp->inp_inc.inc_fibnum; 983 if (inp->inp_inc.inc_flags & INC_ISIPV6) { 984 inc.inc_flags |= INC_ISIPV6; 985 inc.inc6_faddr = inp->inp_inc.inc6_faddr; 986 } else { 987 inc.inc_faddr = inp->inp_inc.inc_faddr; 988 } 989 tcp_hc_updatemtu(&inc, mtu); 990 } 991 992 CTR6(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u), t_maxseg %u, emss %u", 993 __func__, toep->tid, toep->params.mtu_idx, 994 mtus[toep->params.mtu_idx], tp->t_maxseg, toep->params.emss); 995 } 996 997 /* 998 * The TOE driver will not receive any more CPLs for the tid associated with the 999 * toepcb; release the hold on the inpcb. 1000 */ 1001 void 1002 final_cpl_received(struct toepcb *toep) 1003 { 1004 struct inpcb *inp = toep->inp; 1005 bool need_wakeup; 1006 1007 KASSERT(inp != NULL, ("%s: inp is NULL", __func__)); 1008 INP_WLOCK_ASSERT(inp); 1009 KASSERT(toep->flags & TPF_CPL_PENDING, 1010 ("%s: CPL not pending already?", __func__)); 1011 1012 CTR6(KTR_CXGBE, "%s: tid %d, toep %p (0x%x), inp %p (0x%x)", 1013 __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags); 1014 1015 if (ulp_mode(toep) == ULP_MODE_TCPDDP) 1016 release_ddp_resources(toep); 1017 toep->inp = NULL; 1018 need_wakeup = (toep->flags & TPF_WAITING_FOR_FINAL) != 0; 1019 toep->flags &= ~(TPF_CPL_PENDING | TPF_WAITING_FOR_FINAL); 1020 mbufq_drain(&toep->ulp_pduq); 1021 mbufq_drain(&toep->ulp_pdu_reclaimq); 1022 1023 if (!(toep->flags & TPF_ATTACHED)) 1024 release_offload_resources(toep); 1025 1026 if (!in_pcbrele_wlocked(inp)) 1027 INP_WUNLOCK(inp); 1028 1029 if (need_wakeup) { 1030 struct mtx *lock = mtx_pool_find(mtxpool_sleep, toep); 1031 1032 mtx_lock(lock); 1033 wakeup(toep); 1034 mtx_unlock(lock); 1035 } 1036 } 1037 1038 void 1039 insert_tid(struct adapter *sc, int tid, void *ctx, int ntids) 1040 { 1041 struct tid_info *t = &sc->tids; 1042 1043 MPASS(tid >= t->tid_base); 1044 MPASS(tid - t->tid_base < t->ntids); 1045 1046 t->tid_tab[tid - t->tid_base] = ctx; 1047 atomic_add_int(&t->tids_in_use, ntids); 1048 } 1049 1050 void * 1051 lookup_tid(struct adapter *sc, int tid) 1052 { 1053 struct tid_info *t = &sc->tids; 1054 1055 return (t->tid_tab[tid - t->tid_base]); 1056 } 1057 1058 void 1059 update_tid(struct adapter *sc, int tid, void *ctx) 1060 { 1061 struct tid_info *t = &sc->tids; 1062 1063 t->tid_tab[tid - t->tid_base] = ctx; 1064 } 1065 1066 void 1067 remove_tid(struct adapter *sc, int tid, int ntids) 1068 { 1069 struct tid_info *t = &sc->tids; 1070 1071 t->tid_tab[tid - t->tid_base] = NULL; 1072 atomic_subtract_int(&t->tids_in_use, ntids); 1073 } 1074 1075 /* 1076 * What mtu_idx to use, given a 4-tuple. Note that both s->mss and tcp_mssopt 1077 * have the MSS that we should advertise in our SYN. Advertised MSS doesn't 1078 * account for any TCP options so the effective MSS (only payload, no headers or 1079 * options) could be different. 1080 */ 1081 static int 1082 find_best_mtu_idx(struct adapter *sc, struct in_conninfo *inc, 1083 struct offload_settings *s) 1084 { 1085 unsigned short *mtus = &sc->params.mtus[0]; 1086 int i, mss, mtu; 1087 1088 MPASS(inc != NULL); 1089 1090 mss = s->mss > 0 ? s->mss : tcp_mssopt(inc); 1091 if (inc->inc_flags & INC_ISIPV6) 1092 mtu = mss + sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1093 else 1094 mtu = mss + sizeof(struct ip) + sizeof(struct tcphdr); 1095 1096 for (i = 0; i < NMTUS - 1 && mtus[i + 1] <= mtu; i++) 1097 continue; 1098 1099 return (i); 1100 } 1101 1102 /* 1103 * Determine the receive window size for a socket. 1104 */ 1105 u_long 1106 select_rcv_wnd(struct socket *so) 1107 { 1108 unsigned long wnd; 1109 1110 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1111 1112 wnd = sbspace(&so->so_rcv); 1113 if (wnd < MIN_RCV_WND) 1114 wnd = MIN_RCV_WND; 1115 1116 return min(wnd, MAX_RCV_WND); 1117 } 1118 1119 int 1120 select_rcv_wscale(void) 1121 { 1122 int wscale = 0; 1123 unsigned long space = sb_max; 1124 1125 if (space > MAX_RCV_WND) 1126 space = MAX_RCV_WND; 1127 1128 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < space) 1129 wscale++; 1130 1131 return (wscale); 1132 } 1133 1134 __be64 1135 calc_options0(struct vi_info *vi, struct conn_params *cp) 1136 { 1137 uint64_t opt0 = 0; 1138 1139 opt0 |= F_TCAM_BYPASS; 1140 1141 MPASS(cp->wscale >= 0 && cp->wscale <= M_WND_SCALE); 1142 opt0 |= V_WND_SCALE(cp->wscale); 1143 1144 MPASS(cp->mtu_idx >= 0 && cp->mtu_idx < NMTUS); 1145 opt0 |= V_MSS_IDX(cp->mtu_idx); 1146 1147 MPASS(cp->ulp_mode >= 0 && cp->ulp_mode <= M_ULP_MODE); 1148 opt0 |= V_ULP_MODE(cp->ulp_mode); 1149 1150 MPASS(cp->opt0_bufsize >= 0 && cp->opt0_bufsize <= M_RCV_BUFSIZ); 1151 opt0 |= V_RCV_BUFSIZ(cp->opt0_bufsize); 1152 1153 MPASS(cp->l2t_idx >= 0 && cp->l2t_idx < vi->adapter->vres.l2t.size); 1154 opt0 |= V_L2T_IDX(cp->l2t_idx); 1155 1156 opt0 |= V_SMAC_SEL(vi->smt_idx); 1157 opt0 |= V_TX_CHAN(vi->pi->tx_chan); 1158 1159 MPASS(cp->keepalive == 0 || cp->keepalive == 1); 1160 opt0 |= V_KEEP_ALIVE(cp->keepalive); 1161 1162 MPASS(cp->nagle == 0 || cp->nagle == 1); 1163 opt0 |= V_NAGLE(cp->nagle); 1164 1165 return (htobe64(opt0)); 1166 } 1167 1168 __be32 1169 calc_options2(struct vi_info *vi, struct conn_params *cp) 1170 { 1171 uint32_t opt2 = 0; 1172 struct port_info *pi = vi->pi; 1173 struct adapter *sc = pi->adapter; 1174 1175 /* 1176 * rx flow control, rx coalesce, congestion control, and tx pace are all 1177 * explicitly set by the driver. On T5+ the ISS is also set by the 1178 * driver to the value picked by the kernel. 1179 */ 1180 if (is_t4(sc)) { 1181 opt2 |= F_RX_FC_VALID | F_RX_COALESCE_VALID; 1182 opt2 |= F_CONG_CNTRL_VALID | F_PACE_VALID; 1183 } else { 1184 opt2 |= F_T5_OPT_2_VALID; /* all 4 valid */ 1185 opt2 |= F_T5_ISS; /* ISS provided in CPL */ 1186 } 1187 1188 MPASS(cp->sack == 0 || cp->sack == 1); 1189 opt2 |= V_SACK_EN(cp->sack); 1190 1191 MPASS(cp->tstamp == 0 || cp->tstamp == 1); 1192 opt2 |= V_TSTAMPS_EN(cp->tstamp); 1193 1194 if (cp->wscale > 0) 1195 opt2 |= F_WND_SCALE_EN; 1196 1197 MPASS(cp->ecn == 0 || cp->ecn == 1); 1198 opt2 |= V_CCTRL_ECN(cp->ecn); 1199 1200 /* XXX: F_RX_CHANNEL for multiple rx c-chan support goes here. */ 1201 1202 opt2 |= V_TX_QUEUE(sc->params.tp.tx_modq[pi->tx_chan]); 1203 opt2 |= V_PACE(0); 1204 opt2 |= F_RSS_QUEUE_VALID; 1205 opt2 |= V_RSS_QUEUE(sc->sge.ofld_rxq[cp->rxq_idx].iq.abs_id); 1206 1207 MPASS(cp->cong_algo >= 0 && cp->cong_algo <= M_CONG_CNTRL); 1208 opt2 |= V_CONG_CNTRL(cp->cong_algo); 1209 1210 MPASS(cp->rx_coalesce == 0 || cp->rx_coalesce == 1); 1211 if (cp->rx_coalesce == 1) 1212 opt2 |= V_RX_COALESCE(M_RX_COALESCE); 1213 1214 opt2 |= V_RX_FC_DDP(0) | V_RX_FC_DISABLE(0); 1215 #ifdef USE_DDP_RX_FLOW_CONTROL 1216 if (cp->ulp_mode == ULP_MODE_TCPDDP) 1217 opt2 |= F_RX_FC_DDP; 1218 #endif 1219 1220 return (htobe32(opt2)); 1221 } 1222 1223 uint64_t 1224 select_ntuple(struct vi_info *vi, struct l2t_entry *e) 1225 { 1226 struct adapter *sc = vi->adapter; 1227 struct tp_params *tp = &sc->params.tp; 1228 uint64_t ntuple = 0; 1229 1230 /* 1231 * Initialize each of the fields which we care about which are present 1232 * in the Compressed Filter Tuple. 1233 */ 1234 if (tp->vlan_shift >= 0 && EVL_VLANOFTAG(e->vlan) != CPL_L2T_VLAN_NONE) 1235 ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift; 1236 1237 if (tp->port_shift >= 0) 1238 ntuple |= (uint64_t)e->lport << tp->port_shift; 1239 1240 if (tp->protocol_shift >= 0) 1241 ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift; 1242 1243 if (tp->vnic_shift >= 0 && tp->vnic_mode == FW_VNIC_MODE_PF_VF) { 1244 ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vi->vin) | 1245 V_FT_VNID_ID_PF(sc->pf) | V_FT_VNID_ID_VLD(vi->vfvld)) << 1246 tp->vnic_shift; 1247 } 1248 1249 if (is_t4(sc)) 1250 return (htobe32((uint32_t)ntuple)); 1251 else 1252 return (htobe64(V_FILTER_TUPLE(ntuple))); 1253 } 1254 1255 /* 1256 * Initialize various connection parameters. 1257 */ 1258 void 1259 init_conn_params(struct vi_info *vi , struct offload_settings *s, 1260 struct in_conninfo *inc, struct socket *so, 1261 const struct tcp_options *tcpopt, int16_t l2t_idx, struct conn_params *cp) 1262 { 1263 struct port_info *pi = vi->pi; 1264 struct adapter *sc = pi->adapter; 1265 struct tom_tunables *tt = &sc->tt; 1266 struct inpcb *inp = sotoinpcb(so); 1267 struct tcpcb *tp = intotcpcb(inp); 1268 u_long wnd; 1269 u_int q_idx; 1270 1271 MPASS(s->offload != 0); 1272 1273 /* Congestion control algorithm */ 1274 if (s->cong_algo >= 0) 1275 cp->cong_algo = s->cong_algo & M_CONG_CNTRL; 1276 else if (sc->tt.cong_algorithm >= 0) 1277 cp->cong_algo = tt->cong_algorithm & M_CONG_CNTRL; 1278 else { 1279 struct cc_algo *cc = CC_ALGO(tp); 1280 1281 if (strcasecmp(cc->name, "reno") == 0) 1282 cp->cong_algo = CONG_ALG_RENO; 1283 else if (strcasecmp(cc->name, "tahoe") == 0) 1284 cp->cong_algo = CONG_ALG_TAHOE; 1285 if (strcasecmp(cc->name, "newreno") == 0) 1286 cp->cong_algo = CONG_ALG_NEWRENO; 1287 if (strcasecmp(cc->name, "highspeed") == 0) 1288 cp->cong_algo = CONG_ALG_HIGHSPEED; 1289 else { 1290 /* 1291 * Use newreno in case the algorithm selected by the 1292 * host stack is not supported by the hardware. 1293 */ 1294 cp->cong_algo = CONG_ALG_NEWRENO; 1295 } 1296 } 1297 1298 /* Tx traffic scheduling class. */ 1299 if (s->sched_class >= 0 && s->sched_class < sc->params.nsched_cls) 1300 cp->tc_idx = s->sched_class; 1301 else 1302 cp->tc_idx = -1; 1303 1304 /* Nagle's algorithm. */ 1305 if (s->nagle >= 0) 1306 cp->nagle = s->nagle > 0 ? 1 : 0; 1307 else 1308 cp->nagle = tp->t_flags & TF_NODELAY ? 0 : 1; 1309 1310 /* TCP Keepalive. */ 1311 if (V_tcp_always_keepalive || so_options_get(so) & SO_KEEPALIVE) 1312 cp->keepalive = 1; 1313 else 1314 cp->keepalive = 0; 1315 1316 /* Optimization that's specific to T5 @ 40G. */ 1317 if (tt->tx_align >= 0) 1318 cp->tx_align = tt->tx_align > 0 ? 1 : 0; 1319 else if (chip_id(sc) == CHELSIO_T5 && 1320 (port_top_speed(pi) > 10 || sc->params.nports > 2)) 1321 cp->tx_align = 1; 1322 else 1323 cp->tx_align = 0; 1324 1325 /* ULP mode. */ 1326 if (s->ddp > 0 || 1327 (s->ddp < 0 && sc->tt.ddp && (so_options_get(so) & SO_NO_DDP) == 0)) 1328 cp->ulp_mode = ULP_MODE_TCPDDP; 1329 else 1330 cp->ulp_mode = ULP_MODE_NONE; 1331 1332 /* Rx coalescing. */ 1333 if (s->rx_coalesce >= 0) 1334 cp->rx_coalesce = s->rx_coalesce > 0 ? 1 : 0; 1335 else if (tt->rx_coalesce >= 0) 1336 cp->rx_coalesce = tt->rx_coalesce > 0 ? 1 : 0; 1337 else 1338 cp->rx_coalesce = 1; /* default */ 1339 1340 /* 1341 * Index in the PMTU table. This controls the MSS that we announce in 1342 * our SYN initially, but after ESTABLISHED it controls the MSS that we 1343 * use to send data. 1344 */ 1345 cp->mtu_idx = find_best_mtu_idx(sc, inc, s); 1346 1347 /* Tx queue for this connection. */ 1348 if (s->txq == QUEUE_RANDOM) 1349 q_idx = arc4random(); 1350 else if (s->txq == QUEUE_ROUNDROBIN) 1351 q_idx = atomic_fetchadd_int(&vi->txq_rr, 1); 1352 else 1353 q_idx = s->txq; 1354 cp->txq_idx = vi->first_ofld_txq + q_idx % vi->nofldtxq; 1355 1356 /* Rx queue for this connection. */ 1357 if (s->rxq == QUEUE_RANDOM) 1358 q_idx = arc4random(); 1359 else if (s->rxq == QUEUE_ROUNDROBIN) 1360 q_idx = atomic_fetchadd_int(&vi->rxq_rr, 1); 1361 else 1362 q_idx = s->rxq; 1363 cp->rxq_idx = vi->first_ofld_rxq + q_idx % vi->nofldrxq; 1364 1365 if (SOLISTENING(so)) { 1366 /* Passive open */ 1367 MPASS(tcpopt != NULL); 1368 1369 /* TCP timestamp option */ 1370 if (tcpopt->tstamp && 1371 (s->tstamp > 0 || (s->tstamp < 0 && V_tcp_do_rfc1323))) 1372 cp->tstamp = 1; 1373 else 1374 cp->tstamp = 0; 1375 1376 /* SACK */ 1377 if (tcpopt->sack && 1378 (s->sack > 0 || (s->sack < 0 && V_tcp_do_sack))) 1379 cp->sack = 1; 1380 else 1381 cp->sack = 0; 1382 1383 /* Receive window scaling. */ 1384 if (tcpopt->wsf > 0 && tcpopt->wsf < 15 && V_tcp_do_rfc1323) 1385 cp->wscale = select_rcv_wscale(); 1386 else 1387 cp->wscale = 0; 1388 1389 /* ECN */ 1390 if (tcpopt->ecn && /* XXX: review. */ 1391 (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn))) 1392 cp->ecn = 1; 1393 else 1394 cp->ecn = 0; 1395 1396 wnd = max(so->sol_sbrcv_hiwat, MIN_RCV_WND); 1397 cp->opt0_bufsize = min(wnd >> 10, M_RCV_BUFSIZ); 1398 1399 if (tt->sndbuf > 0) 1400 cp->sndbuf = tt->sndbuf; 1401 else if (so->sol_sbsnd_flags & SB_AUTOSIZE && 1402 V_tcp_do_autosndbuf) 1403 cp->sndbuf = 256 * 1024; 1404 else 1405 cp->sndbuf = so->sol_sbsnd_hiwat; 1406 } else { 1407 /* Active open */ 1408 1409 /* TCP timestamp option */ 1410 if (s->tstamp > 0 || 1411 (s->tstamp < 0 && (tp->t_flags & TF_REQ_TSTMP))) 1412 cp->tstamp = 1; 1413 else 1414 cp->tstamp = 0; 1415 1416 /* SACK */ 1417 if (s->sack > 0 || 1418 (s->sack < 0 && (tp->t_flags & TF_SACK_PERMIT))) 1419 cp->sack = 1; 1420 else 1421 cp->sack = 0; 1422 1423 /* Receive window scaling */ 1424 if (tp->t_flags & TF_REQ_SCALE) 1425 cp->wscale = select_rcv_wscale(); 1426 else 1427 cp->wscale = 0; 1428 1429 /* ECN */ 1430 if (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn == 1)) 1431 cp->ecn = 1; 1432 else 1433 cp->ecn = 0; 1434 1435 SOCKBUF_LOCK(&so->so_rcv); 1436 wnd = max(select_rcv_wnd(so), MIN_RCV_WND); 1437 SOCKBUF_UNLOCK(&so->so_rcv); 1438 cp->opt0_bufsize = min(wnd >> 10, M_RCV_BUFSIZ); 1439 1440 if (tt->sndbuf > 0) 1441 cp->sndbuf = tt->sndbuf; 1442 else { 1443 SOCKBUF_LOCK(&so->so_snd); 1444 if (so->so_snd.sb_flags & SB_AUTOSIZE && 1445 V_tcp_do_autosndbuf) 1446 cp->sndbuf = 256 * 1024; 1447 else 1448 cp->sndbuf = so->so_snd.sb_hiwat; 1449 SOCKBUF_UNLOCK(&so->so_snd); 1450 } 1451 } 1452 1453 cp->l2t_idx = l2t_idx; 1454 1455 /* This will be initialized on ESTABLISHED. */ 1456 cp->emss = 0; 1457 } 1458 1459 int 1460 negative_advice(int status) 1461 { 1462 1463 return (status == CPL_ERR_RTX_NEG_ADVICE || 1464 status == CPL_ERR_PERSIST_NEG_ADVICE || 1465 status == CPL_ERR_KEEPALV_NEG_ADVICE); 1466 } 1467 1468 static int 1469 alloc_tid_tab(struct tid_info *t, int flags) 1470 { 1471 1472 MPASS(t->ntids > 0); 1473 MPASS(t->tid_tab == NULL); 1474 1475 t->tid_tab = malloc(t->ntids * sizeof(*t->tid_tab), M_CXGBE, 1476 M_ZERO | flags); 1477 if (t->tid_tab == NULL) 1478 return (ENOMEM); 1479 atomic_store_rel_int(&t->tids_in_use, 0); 1480 1481 return (0); 1482 } 1483 1484 static void 1485 free_tid_tab(struct tid_info *t) 1486 { 1487 1488 KASSERT(t->tids_in_use == 0, 1489 ("%s: %d tids still in use.", __func__, t->tids_in_use)); 1490 1491 free(t->tid_tab, M_CXGBE); 1492 t->tid_tab = NULL; 1493 } 1494 1495 static int 1496 alloc_stid_tab(struct tid_info *t, int flags) 1497 { 1498 1499 MPASS(t->nstids > 0); 1500 MPASS(t->stid_tab == NULL); 1501 1502 t->stid_tab = malloc(t->nstids * sizeof(*t->stid_tab), M_CXGBE, 1503 M_ZERO | flags); 1504 if (t->stid_tab == NULL) 1505 return (ENOMEM); 1506 mtx_init(&t->stid_lock, "stid lock", NULL, MTX_DEF); 1507 t->stids_in_use = 0; 1508 TAILQ_INIT(&t->stids); 1509 t->nstids_free_head = t->nstids; 1510 1511 return (0); 1512 } 1513 1514 static void 1515 free_stid_tab(struct tid_info *t) 1516 { 1517 1518 KASSERT(t->stids_in_use == 0, 1519 ("%s: %d tids still in use.", __func__, t->stids_in_use)); 1520 1521 if (mtx_initialized(&t->stid_lock)) 1522 mtx_destroy(&t->stid_lock); 1523 free(t->stid_tab, M_CXGBE); 1524 t->stid_tab = NULL; 1525 } 1526 1527 static void 1528 free_tid_tabs(struct tid_info *t) 1529 { 1530 1531 free_tid_tab(t); 1532 free_stid_tab(t); 1533 } 1534 1535 static int 1536 alloc_tid_tabs(struct tid_info *t) 1537 { 1538 int rc; 1539 1540 rc = alloc_tid_tab(t, M_NOWAIT); 1541 if (rc != 0) 1542 goto failed; 1543 1544 rc = alloc_stid_tab(t, M_NOWAIT); 1545 if (rc != 0) 1546 goto failed; 1547 1548 return (0); 1549 failed: 1550 free_tid_tabs(t); 1551 return (rc); 1552 } 1553 1554 static inline void 1555 alloc_tcb_history(struct adapter *sc, struct tom_data *td) 1556 { 1557 1558 if (sc->tids.ntids == 0 || sc->tids.ntids > 1024) 1559 return; 1560 rw_init(&td->tcb_history_lock, "TCB history"); 1561 td->tcb_history = malloc(sc->tids.ntids * sizeof(*td->tcb_history), 1562 M_CXGBE, M_ZERO | M_NOWAIT); 1563 td->dupack_threshold = G_DUPACKTHRESH(t4_read_reg(sc, A_TP_PARA_REG0)); 1564 } 1565 1566 static inline void 1567 free_tcb_history(struct adapter *sc, struct tom_data *td) 1568 { 1569 #ifdef INVARIANTS 1570 int i; 1571 1572 if (td->tcb_history != NULL) { 1573 for (i = 0; i < sc->tids.ntids; i++) { 1574 MPASS(td->tcb_history[i] == NULL); 1575 } 1576 } 1577 #endif 1578 free(td->tcb_history, M_CXGBE); 1579 if (rw_initialized(&td->tcb_history_lock)) 1580 rw_destroy(&td->tcb_history_lock); 1581 } 1582 1583 static void 1584 free_tom_data(struct adapter *sc, struct tom_data *td) 1585 { 1586 1587 ASSERT_SYNCHRONIZED_OP(sc); 1588 1589 KASSERT(TAILQ_EMPTY(&td->toep_list), 1590 ("%s: TOE PCB list is not empty.", __func__)); 1591 KASSERT(td->lctx_count == 0, 1592 ("%s: lctx hash table is not empty.", __func__)); 1593 1594 t4_free_ppod_region(&td->pr); 1595 1596 if (td->listen_mask != 0) 1597 hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask); 1598 1599 if (mtx_initialized(&td->unsent_wr_lock)) 1600 mtx_destroy(&td->unsent_wr_lock); 1601 if (mtx_initialized(&td->lctx_hash_lock)) 1602 mtx_destroy(&td->lctx_hash_lock); 1603 if (mtx_initialized(&td->toep_list_lock)) 1604 mtx_destroy(&td->toep_list_lock); 1605 1606 free_tcb_history(sc, td); 1607 free_tid_tabs(&sc->tids); 1608 free(td, M_CXGBE); 1609 } 1610 1611 static char * 1612 prepare_pkt(int open_type, uint16_t vtag, struct inpcb *inp, int *pktlen, 1613 int *buflen) 1614 { 1615 char *pkt; 1616 struct tcphdr *th; 1617 int ipv6, len; 1618 const int maxlen = 1619 max(sizeof(struct ether_header), sizeof(struct ether_vlan_header)) + 1620 max(sizeof(struct ip), sizeof(struct ip6_hdr)) + 1621 sizeof(struct tcphdr); 1622 1623 MPASS(open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN); 1624 1625 pkt = malloc(maxlen, M_CXGBE, M_ZERO | M_NOWAIT); 1626 if (pkt == NULL) 1627 return (NULL); 1628 1629 ipv6 = inp->inp_vflag & INP_IPV6; 1630 len = 0; 1631 1632 if (EVL_VLANOFTAG(vtag) == 0xfff) { 1633 struct ether_header *eh = (void *)pkt; 1634 1635 if (ipv6) 1636 eh->ether_type = htons(ETHERTYPE_IPV6); 1637 else 1638 eh->ether_type = htons(ETHERTYPE_IP); 1639 1640 len += sizeof(*eh); 1641 } else { 1642 struct ether_vlan_header *evh = (void *)pkt; 1643 1644 evh->evl_encap_proto = htons(ETHERTYPE_VLAN); 1645 evh->evl_tag = htons(vtag); 1646 if (ipv6) 1647 evh->evl_proto = htons(ETHERTYPE_IPV6); 1648 else 1649 evh->evl_proto = htons(ETHERTYPE_IP); 1650 1651 len += sizeof(*evh); 1652 } 1653 1654 if (ipv6) { 1655 struct ip6_hdr *ip6 = (void *)&pkt[len]; 1656 1657 ip6->ip6_vfc = IPV6_VERSION; 1658 ip6->ip6_plen = htons(sizeof(struct tcphdr)); 1659 ip6->ip6_nxt = IPPROTO_TCP; 1660 if (open_type == OPEN_TYPE_ACTIVE) { 1661 ip6->ip6_src = inp->in6p_laddr; 1662 ip6->ip6_dst = inp->in6p_faddr; 1663 } else if (open_type == OPEN_TYPE_LISTEN) { 1664 ip6->ip6_src = inp->in6p_laddr; 1665 ip6->ip6_dst = ip6->ip6_src; 1666 } 1667 1668 len += sizeof(*ip6); 1669 } else { 1670 struct ip *ip = (void *)&pkt[len]; 1671 1672 ip->ip_v = IPVERSION; 1673 ip->ip_hl = sizeof(*ip) >> 2; 1674 ip->ip_tos = inp->inp_ip_tos; 1675 ip->ip_len = htons(sizeof(struct ip) + sizeof(struct tcphdr)); 1676 ip->ip_ttl = inp->inp_ip_ttl; 1677 ip->ip_p = IPPROTO_TCP; 1678 if (open_type == OPEN_TYPE_ACTIVE) { 1679 ip->ip_src = inp->inp_laddr; 1680 ip->ip_dst = inp->inp_faddr; 1681 } else if (open_type == OPEN_TYPE_LISTEN) { 1682 ip->ip_src = inp->inp_laddr; 1683 ip->ip_dst = ip->ip_src; 1684 } 1685 1686 len += sizeof(*ip); 1687 } 1688 1689 th = (void *)&pkt[len]; 1690 if (open_type == OPEN_TYPE_ACTIVE) { 1691 th->th_sport = inp->inp_lport; /* network byte order already */ 1692 th->th_dport = inp->inp_fport; /* ditto */ 1693 } else if (open_type == OPEN_TYPE_LISTEN) { 1694 th->th_sport = inp->inp_lport; /* network byte order already */ 1695 th->th_dport = th->th_sport; 1696 } 1697 len += sizeof(th); 1698 1699 *pktlen = *buflen = len; 1700 return (pkt); 1701 } 1702 1703 const struct offload_settings * 1704 lookup_offload_policy(struct adapter *sc, int open_type, struct mbuf *m, 1705 uint16_t vtag, struct inpcb *inp) 1706 { 1707 const struct t4_offload_policy *op; 1708 char *pkt; 1709 struct offload_rule *r; 1710 int i, matched, pktlen, buflen; 1711 static const struct offload_settings allow_offloading_settings = { 1712 .offload = 1, 1713 .rx_coalesce = -1, 1714 .cong_algo = -1, 1715 .sched_class = -1, 1716 .tstamp = -1, 1717 .sack = -1, 1718 .nagle = -1, 1719 .ecn = -1, 1720 .ddp = -1, 1721 .tls = -1, 1722 .txq = QUEUE_RANDOM, 1723 .rxq = QUEUE_RANDOM, 1724 .mss = -1, 1725 }; 1726 static const struct offload_settings disallow_offloading_settings = { 1727 .offload = 0, 1728 /* rest is irrelevant when offload is off. */ 1729 }; 1730 1731 rw_assert(&sc->policy_lock, RA_LOCKED); 1732 1733 /* 1734 * If there's no Connection Offloading Policy attached to the device 1735 * then we need to return a default static policy. If 1736 * "cop_managed_offloading" is true, then we need to disallow 1737 * offloading until a COP is attached to the device. Otherwise we 1738 * allow offloading ... 1739 */ 1740 op = sc->policy; 1741 if (op == NULL) { 1742 if (sc->tt.cop_managed_offloading) 1743 return (&disallow_offloading_settings); 1744 else 1745 return (&allow_offloading_settings); 1746 } 1747 1748 switch (open_type) { 1749 case OPEN_TYPE_ACTIVE: 1750 case OPEN_TYPE_LISTEN: 1751 pkt = prepare_pkt(open_type, vtag, inp, &pktlen, &buflen); 1752 break; 1753 case OPEN_TYPE_PASSIVE: 1754 MPASS(m != NULL); 1755 pkt = mtod(m, char *); 1756 MPASS(*pkt == CPL_PASS_ACCEPT_REQ); 1757 pkt += sizeof(struct cpl_pass_accept_req); 1758 pktlen = m->m_pkthdr.len - sizeof(struct cpl_pass_accept_req); 1759 buflen = m->m_len - sizeof(struct cpl_pass_accept_req); 1760 break; 1761 default: 1762 MPASS(0); 1763 return (&disallow_offloading_settings); 1764 } 1765 1766 if (pkt == NULL || pktlen == 0 || buflen == 0) 1767 return (&disallow_offloading_settings); 1768 1769 matched = 0; 1770 r = &op->rule[0]; 1771 for (i = 0; i < op->nrules; i++, r++) { 1772 if (r->open_type != open_type && 1773 r->open_type != OPEN_TYPE_DONTCARE) { 1774 continue; 1775 } 1776 matched = bpf_filter(r->bpf_prog.bf_insns, pkt, pktlen, buflen); 1777 if (matched) 1778 break; 1779 } 1780 1781 if (open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN) 1782 free(pkt, M_CXGBE); 1783 1784 return (matched ? &r->settings : &disallow_offloading_settings); 1785 } 1786 1787 static void 1788 reclaim_wr_resources(void *arg, int count) 1789 { 1790 struct tom_data *td = arg; 1791 STAILQ_HEAD(, wrqe) twr_list = STAILQ_HEAD_INITIALIZER(twr_list); 1792 struct cpl_act_open_req *cpl; 1793 u_int opcode, atid, tid; 1794 struct wrqe *wr; 1795 struct adapter *sc = td_adapter(td); 1796 1797 mtx_lock(&td->unsent_wr_lock); 1798 STAILQ_SWAP(&td->unsent_wr_list, &twr_list, wrqe); 1799 mtx_unlock(&td->unsent_wr_lock); 1800 1801 while ((wr = STAILQ_FIRST(&twr_list)) != NULL) { 1802 STAILQ_REMOVE_HEAD(&twr_list, link); 1803 1804 cpl = wrtod(wr); 1805 opcode = GET_OPCODE(cpl); 1806 1807 switch (opcode) { 1808 case CPL_ACT_OPEN_REQ: 1809 case CPL_ACT_OPEN_REQ6: 1810 atid = G_TID_TID(be32toh(OPCODE_TID(cpl))); 1811 CTR2(KTR_CXGBE, "%s: atid %u ", __func__, atid); 1812 act_open_failure_cleanup(sc, atid, EHOSTUNREACH); 1813 free(wr, M_CXGBE); 1814 break; 1815 case CPL_PASS_ACCEPT_RPL: 1816 tid = GET_TID(cpl); 1817 CTR2(KTR_CXGBE, "%s: tid %u ", __func__, tid); 1818 synack_failure_cleanup(sc, tid); 1819 free(wr, M_CXGBE); 1820 break; 1821 default: 1822 log(LOG_ERR, "%s: leaked work request %p, wr_len %d, " 1823 "opcode %x\n", __func__, wr, wr->wr_len, opcode); 1824 /* WR not freed here; go look at it with a debugger. */ 1825 } 1826 } 1827 } 1828 1829 /* 1830 * Ground control to Major TOM 1831 * Commencing countdown, engines on 1832 */ 1833 static int 1834 t4_tom_activate(struct adapter *sc) 1835 { 1836 struct tom_data *td; 1837 struct toedev *tod; 1838 struct vi_info *vi; 1839 int i, rc, v; 1840 1841 ASSERT_SYNCHRONIZED_OP(sc); 1842 1843 /* per-adapter softc for TOM */ 1844 td = malloc(sizeof(*td), M_CXGBE, M_ZERO | M_NOWAIT); 1845 if (td == NULL) 1846 return (ENOMEM); 1847 1848 /* List of TOE PCBs and associated lock */ 1849 mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF); 1850 TAILQ_INIT(&td->toep_list); 1851 1852 /* Listen context */ 1853 mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF); 1854 td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE, 1855 &td->listen_mask, HASH_NOWAIT); 1856 1857 /* List of WRs for which L2 resolution failed */ 1858 mtx_init(&td->unsent_wr_lock, "Unsent WR list lock", NULL, MTX_DEF); 1859 STAILQ_INIT(&td->unsent_wr_list); 1860 TASK_INIT(&td->reclaim_wr_resources, 0, reclaim_wr_resources, td); 1861 1862 /* TID tables */ 1863 rc = alloc_tid_tabs(&sc->tids); 1864 if (rc != 0) 1865 goto done; 1866 1867 rc = t4_init_ppod_region(&td->pr, &sc->vres.ddp, 1868 t4_read_reg(sc, A_ULP_RX_TDDP_PSZ), "TDDP page pods"); 1869 if (rc != 0) 1870 goto done; 1871 t4_set_reg_field(sc, A_ULP_RX_TDDP_TAGMASK, 1872 V_TDDPTAGMASK(M_TDDPTAGMASK), td->pr.pr_tag_mask); 1873 1874 alloc_tcb_history(sc, td); 1875 1876 /* toedev ops */ 1877 tod = &td->tod; 1878 init_toedev(tod); 1879 tod->tod_softc = sc; 1880 tod->tod_connect = t4_connect; 1881 tod->tod_listen_start = t4_listen_start; 1882 tod->tod_listen_stop = t4_listen_stop; 1883 tod->tod_rcvd = t4_rcvd; 1884 tod->tod_output = t4_tod_output; 1885 tod->tod_send_rst = t4_send_rst; 1886 tod->tod_send_fin = t4_send_fin; 1887 tod->tod_pcb_detach = t4_pcb_detach; 1888 tod->tod_l2_update = t4_l2_update; 1889 tod->tod_syncache_added = t4_syncache_added; 1890 tod->tod_syncache_removed = t4_syncache_removed; 1891 tod->tod_syncache_respond = t4_syncache_respond; 1892 tod->tod_offload_socket = t4_offload_socket; 1893 tod->tod_ctloutput = t4_ctloutput; 1894 tod->tod_tcp_info = t4_tcp_info; 1895 #ifdef KERN_TLS 1896 tod->tod_alloc_tls_session = t4_alloc_tls_session; 1897 #endif 1898 tod->tod_pmtu_update = t4_pmtu_update; 1899 1900 for_each_port(sc, i) { 1901 for_each_vi(sc->port[i], v, vi) { 1902 SETTOEDEV(vi->ifp, &td->tod); 1903 } 1904 } 1905 1906 sc->tom_softc = td; 1907 register_toedev(sc->tom_softc); 1908 1909 done: 1910 if (rc != 0) 1911 free_tom_data(sc, td); 1912 return (rc); 1913 } 1914 1915 static int 1916 t4_tom_deactivate(struct adapter *sc) 1917 { 1918 int rc = 0; 1919 struct tom_data *td = sc->tom_softc; 1920 1921 ASSERT_SYNCHRONIZED_OP(sc); 1922 1923 if (td == NULL) 1924 return (0); /* XXX. KASSERT? */ 1925 1926 if (sc->offload_map != 0) 1927 return (EBUSY); /* at least one port has IFCAP_TOE enabled */ 1928 1929 if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI)) 1930 return (EBUSY); /* both iWARP and iSCSI rely on the TOE. */ 1931 1932 mtx_lock(&td->toep_list_lock); 1933 if (!TAILQ_EMPTY(&td->toep_list)) 1934 rc = EBUSY; 1935 mtx_unlock(&td->toep_list_lock); 1936 1937 mtx_lock(&td->lctx_hash_lock); 1938 if (td->lctx_count > 0) 1939 rc = EBUSY; 1940 mtx_unlock(&td->lctx_hash_lock); 1941 1942 taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources); 1943 mtx_lock(&td->unsent_wr_lock); 1944 if (!STAILQ_EMPTY(&td->unsent_wr_list)) 1945 rc = EBUSY; 1946 mtx_unlock(&td->unsent_wr_lock); 1947 1948 if (rc == 0) { 1949 unregister_toedev(sc->tom_softc); 1950 free_tom_data(sc, td); 1951 sc->tom_softc = NULL; 1952 } 1953 1954 return (rc); 1955 } 1956 1957 static int 1958 t4_aio_queue_tom(struct socket *so, struct kaiocb *job) 1959 { 1960 struct tcpcb *tp = sototcpcb(so); 1961 struct toepcb *toep = tp->t_toe; 1962 int error; 1963 1964 /* 1965 * No lock is needed as TOE sockets never change between 1966 * active and passive. 1967 */ 1968 if (SOLISTENING(so)) 1969 return (EINVAL); 1970 1971 if (ulp_mode(toep) == ULP_MODE_TCPDDP) { 1972 error = t4_aio_queue_ddp(so, job); 1973 if (error != EOPNOTSUPP) 1974 return (error); 1975 } 1976 1977 return (t4_aio_queue_aiotx(so, job)); 1978 } 1979 1980 static int 1981 t4_tom_mod_load(void) 1982 { 1983 /* CPL handlers */ 1984 t4_register_cpl_handler(CPL_GET_TCB_RPL, do_get_tcb_rpl); 1985 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl2, 1986 CPL_COOKIE_TOM); 1987 t4_init_connect_cpl_handlers(); 1988 t4_init_listen_cpl_handlers(); 1989 t4_init_cpl_io_handlers(); 1990 1991 t4_ddp_mod_load(); 1992 t4_tls_mod_load(); 1993 1994 bcopy(&tcp_protosw, &toe_protosw, sizeof(toe_protosw)); 1995 toe_protosw.pr_aio_queue = t4_aio_queue_tom; 1996 1997 bcopy(&tcp6_protosw, &toe6_protosw, sizeof(toe6_protosw)); 1998 toe6_protosw.pr_aio_queue = t4_aio_queue_tom; 1999 2000 return (t4_register_uld(&tom_uld_info)); 2001 } 2002 2003 static void 2004 tom_uninit(struct adapter *sc, void *arg __unused) 2005 { 2006 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tomun")) 2007 return; 2008 2009 /* Try to free resources (works only if no port has IFCAP_TOE) */ 2010 if (uld_active(sc, ULD_TOM)) 2011 t4_deactivate_uld(sc, ULD_TOM); 2012 2013 end_synchronized_op(sc, 0); 2014 } 2015 2016 static int 2017 t4_tom_mod_unload(void) 2018 { 2019 t4_iterate(tom_uninit, NULL); 2020 2021 if (t4_unregister_uld(&tom_uld_info) == EBUSY) 2022 return (EBUSY); 2023 2024 t4_tls_mod_unload(); 2025 t4_ddp_mod_unload(); 2026 2027 t4_uninit_connect_cpl_handlers(); 2028 t4_uninit_listen_cpl_handlers(); 2029 t4_uninit_cpl_io_handlers(); 2030 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, NULL, CPL_COOKIE_TOM); 2031 t4_register_cpl_handler(CPL_GET_TCB_RPL, NULL); 2032 2033 return (0); 2034 } 2035 #endif /* TCP_OFFLOAD */ 2036 2037 static int 2038 t4_tom_modevent(module_t mod, int cmd, void *arg) 2039 { 2040 int rc = 0; 2041 2042 #ifdef TCP_OFFLOAD 2043 switch (cmd) { 2044 case MOD_LOAD: 2045 rc = t4_tom_mod_load(); 2046 break; 2047 2048 case MOD_UNLOAD: 2049 rc = t4_tom_mod_unload(); 2050 break; 2051 2052 default: 2053 rc = EINVAL; 2054 } 2055 #else 2056 printf("t4_tom: compiled without TCP_OFFLOAD support.\n"); 2057 rc = EOPNOTSUPP; 2058 #endif 2059 return (rc); 2060 } 2061 2062 static moduledata_t t4_tom_moddata= { 2063 "t4_tom", 2064 t4_tom_modevent, 2065 0 2066 }; 2067 2068 MODULE_VERSION(t4_tom, 1); 2069 MODULE_DEPEND(t4_tom, toecore, 1, 1, 1); 2070 MODULE_DEPEND(t4_tom, t4nex, 1, 1, 1); 2071 DECLARE_MODULE(t4_tom, t4_tom_moddata, SI_SUB_EXEC, SI_ORDER_ANY); 2072