1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 #include "opt_kern_tls.h" 36 #include "opt_ratelimit.h" 37 38 #include <sys/param.h> 39 #include <sys/types.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/ktr.h> 43 #include <sys/lock.h> 44 #include <sys/limits.h> 45 #include <sys/module.h> 46 #include <sys/protosw.h> 47 #include <sys/domain.h> 48 #include <sys/refcount.h> 49 #include <sys/rmlock.h> 50 #include <sys/socket.h> 51 #include <sys/socketvar.h> 52 #include <sys/sysctl.h> 53 #include <sys/taskqueue.h> 54 #include <net/if.h> 55 #include <net/if_var.h> 56 #include <net/if_types.h> 57 #include <net/if_vlan_var.h> 58 #include <netinet/in.h> 59 #include <netinet/in_pcb.h> 60 #include <netinet/in_var.h> 61 #include <netinet/ip.h> 62 #include <netinet/ip6.h> 63 #include <netinet6/scope6_var.h> 64 #define TCPSTATES 65 #include <netinet/tcp_fsm.h> 66 #include <netinet/tcp_seq.h> 67 #include <netinet/tcp_timer.h> 68 #include <netinet/tcp_var.h> 69 #include <netinet/toecore.h> 70 #include <netinet/cc/cc.h> 71 72 #ifdef TCP_OFFLOAD 73 #include "common/common.h" 74 #include "common/t4_msg.h" 75 #include "common/t4_regs.h" 76 #include "common/t4_regs_values.h" 77 #include "common/t4_tcb.h" 78 #include "t4_clip.h" 79 #include "tom/t4_tom_l2t.h" 80 #include "tom/t4_tom.h" 81 #include "tom/t4_tls.h" 82 83 static struct protosw *tcp_protosw; 84 static struct protosw toe_protosw; 85 static struct pr_usrreqs toe_usrreqs; 86 87 static struct protosw *tcp6_protosw; 88 static struct protosw toe6_protosw; 89 static struct pr_usrreqs toe6_usrreqs; 90 91 /* Module ops */ 92 static int t4_tom_mod_load(void); 93 static int t4_tom_mod_unload(void); 94 static int t4_tom_modevent(module_t, int, void *); 95 96 /* ULD ops and helpers */ 97 static int t4_tom_activate(struct adapter *); 98 static int t4_tom_deactivate(struct adapter *); 99 100 static struct uld_info tom_uld_info = { 101 .uld_id = ULD_TOM, 102 .activate = t4_tom_activate, 103 .deactivate = t4_tom_deactivate, 104 }; 105 106 static void release_offload_resources(struct toepcb *); 107 static int alloc_tid_tabs(struct tid_info *); 108 static void free_tid_tabs(struct tid_info *); 109 static void free_tom_data(struct adapter *, struct tom_data *); 110 static void reclaim_wr_resources(void *, int); 111 112 struct toepcb * 113 alloc_toepcb(struct vi_info *vi, int flags) 114 { 115 struct port_info *pi = vi->pi; 116 struct adapter *sc = pi->adapter; 117 struct toepcb *toep; 118 int tx_credits, txsd_total, len; 119 120 /* 121 * The firmware counts tx work request credits in units of 16 bytes 122 * each. Reserve room for an ABORT_REQ so the driver never has to worry 123 * about tx credits if it wants to abort a connection. 124 */ 125 tx_credits = sc->params.ofldq_wr_cred; 126 tx_credits -= howmany(sizeof(struct cpl_abort_req), 16); 127 128 /* 129 * Shortest possible tx work request is a fw_ofld_tx_data_wr + 1 byte 130 * immediate payload, and firmware counts tx work request credits in 131 * units of 16 byte. Calculate the maximum work requests possible. 132 */ 133 txsd_total = tx_credits / 134 howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16); 135 136 len = offsetof(struct toepcb, txsd) + 137 txsd_total * sizeof(struct ofld_tx_sdesc); 138 139 toep = malloc(len, M_CXGBE, M_ZERO | flags); 140 if (toep == NULL) 141 return (NULL); 142 143 refcount_init(&toep->refcount, 1); 144 toep->td = sc->tom_softc; 145 toep->vi = vi; 146 toep->tid = -1; 147 toep->tx_total = tx_credits; 148 toep->tx_credits = tx_credits; 149 mbufq_init(&toep->ulp_pduq, INT_MAX); 150 mbufq_init(&toep->ulp_pdu_reclaimq, INT_MAX); 151 toep->txsd_total = txsd_total; 152 toep->txsd_avail = txsd_total; 153 toep->txsd_pidx = 0; 154 toep->txsd_cidx = 0; 155 aiotx_init_toep(toep); 156 157 return (toep); 158 } 159 160 /* 161 * Initialize a toepcb after its params have been filled out. 162 */ 163 int 164 init_toepcb(struct vi_info *vi, struct toepcb *toep) 165 { 166 struct conn_params *cp = &toep->params; 167 struct port_info *pi = vi->pi; 168 struct adapter *sc = pi->adapter; 169 struct tx_cl_rl_params *tc; 170 171 if (cp->tc_idx >= 0 && cp->tc_idx < sc->chip_params->nsched_cls) { 172 tc = &pi->sched_params->cl_rl[cp->tc_idx]; 173 mtx_lock(&sc->tc_lock); 174 if (tc->flags & CLRL_ERR) { 175 log(LOG_ERR, 176 "%s: failed to associate traffic class %u with tid %u\n", 177 device_get_nameunit(vi->dev), cp->tc_idx, 178 toep->tid); 179 cp->tc_idx = -1; 180 } else { 181 tc->refcount++; 182 } 183 mtx_unlock(&sc->tc_lock); 184 } 185 toep->ofld_txq = &sc->sge.ofld_txq[cp->txq_idx]; 186 toep->ofld_rxq = &sc->sge.ofld_rxq[cp->rxq_idx]; 187 toep->ctrlq = &sc->sge.ctrlq[pi->port_id]; 188 189 tls_init_toep(toep); 190 if (ulp_mode(toep) == ULP_MODE_TCPDDP) 191 ddp_init_toep(toep); 192 193 toep->flags |= TPF_INITIALIZED; 194 195 return (0); 196 } 197 198 struct toepcb * 199 hold_toepcb(struct toepcb *toep) 200 { 201 202 refcount_acquire(&toep->refcount); 203 return (toep); 204 } 205 206 void 207 free_toepcb(struct toepcb *toep) 208 { 209 210 if (refcount_release(&toep->refcount) == 0) 211 return; 212 213 KASSERT(!(toep->flags & TPF_ATTACHED), 214 ("%s: attached to an inpcb", __func__)); 215 KASSERT(!(toep->flags & TPF_CPL_PENDING), 216 ("%s: CPL pending", __func__)); 217 218 if (toep->flags & TPF_INITIALIZED) { 219 if (ulp_mode(toep) == ULP_MODE_TCPDDP) 220 ddp_uninit_toep(toep); 221 tls_uninit_toep(toep); 222 } 223 free(toep, M_CXGBE); 224 } 225 226 /* 227 * Set up the socket for TCP offload. 228 */ 229 void 230 offload_socket(struct socket *so, struct toepcb *toep) 231 { 232 struct tom_data *td = toep->td; 233 struct inpcb *inp = sotoinpcb(so); 234 struct tcpcb *tp = intotcpcb(inp); 235 struct sockbuf *sb; 236 237 INP_WLOCK_ASSERT(inp); 238 239 /* Update socket */ 240 sb = &so->so_snd; 241 SOCKBUF_LOCK(sb); 242 sb->sb_flags |= SB_NOCOALESCE; 243 SOCKBUF_UNLOCK(sb); 244 sb = &so->so_rcv; 245 SOCKBUF_LOCK(sb); 246 sb->sb_flags |= SB_NOCOALESCE; 247 if (inp->inp_vflag & INP_IPV6) 248 so->so_proto = &toe6_protosw; 249 else 250 so->so_proto = &toe_protosw; 251 SOCKBUF_UNLOCK(sb); 252 253 /* Update TCP PCB */ 254 tp->tod = &td->tod; 255 tp->t_toe = toep; 256 tp->t_flags |= TF_TOE; 257 258 /* Install an extra hold on inp */ 259 toep->inp = inp; 260 toep->flags |= TPF_ATTACHED; 261 in_pcbref(inp); 262 263 /* Add the TOE PCB to the active list */ 264 mtx_lock(&td->toep_list_lock); 265 TAILQ_INSERT_HEAD(&td->toep_list, toep, link); 266 mtx_unlock(&td->toep_list_lock); 267 } 268 269 void 270 restore_so_proto(struct socket *so, bool v6) 271 { 272 if (v6) 273 so->so_proto = tcp6_protosw; 274 else 275 so->so_proto = tcp_protosw; 276 } 277 278 /* This is _not_ the normal way to "unoffload" a socket. */ 279 void 280 undo_offload_socket(struct socket *so) 281 { 282 struct inpcb *inp = sotoinpcb(so); 283 struct tcpcb *tp = intotcpcb(inp); 284 struct toepcb *toep = tp->t_toe; 285 struct tom_data *td = toep->td; 286 struct sockbuf *sb; 287 288 INP_WLOCK_ASSERT(inp); 289 290 sb = &so->so_snd; 291 SOCKBUF_LOCK(sb); 292 sb->sb_flags &= ~SB_NOCOALESCE; 293 SOCKBUF_UNLOCK(sb); 294 sb = &so->so_rcv; 295 SOCKBUF_LOCK(sb); 296 sb->sb_flags &= ~SB_NOCOALESCE; 297 restore_so_proto(so, inp->inp_vflag & INP_IPV6); 298 SOCKBUF_UNLOCK(sb); 299 300 tp->tod = NULL; 301 tp->t_toe = NULL; 302 tp->t_flags &= ~TF_TOE; 303 304 toep->inp = NULL; 305 toep->flags &= ~TPF_ATTACHED; 306 if (in_pcbrele_wlocked(inp)) 307 panic("%s: inp freed.", __func__); 308 309 mtx_lock(&td->toep_list_lock); 310 TAILQ_REMOVE(&td->toep_list, toep, link); 311 mtx_unlock(&td->toep_list_lock); 312 } 313 314 static void 315 release_offload_resources(struct toepcb *toep) 316 { 317 struct tom_data *td = toep->td; 318 struct adapter *sc = td_adapter(td); 319 int tid = toep->tid; 320 321 KASSERT(!(toep->flags & TPF_CPL_PENDING), 322 ("%s: %p has CPL pending.", __func__, toep)); 323 KASSERT(!(toep->flags & TPF_ATTACHED), 324 ("%s: %p is still attached.", __func__, toep)); 325 326 CTR5(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p, ce %p)", 327 __func__, toep, tid, toep->l2te, toep->ce); 328 329 /* 330 * These queues should have been emptied at approximately the same time 331 * that a normal connection's socket's so_snd would have been purged or 332 * drained. Do _not_ clean up here. 333 */ 334 MPASS(mbufq_len(&toep->ulp_pduq) == 0); 335 MPASS(mbufq_len(&toep->ulp_pdu_reclaimq) == 0); 336 #ifdef INVARIANTS 337 if (ulp_mode(toep) == ULP_MODE_TCPDDP) 338 ddp_assert_empty(toep); 339 #endif 340 MPASS(TAILQ_EMPTY(&toep->aiotx_jobq)); 341 342 if (toep->l2te) 343 t4_l2t_release(toep->l2te); 344 345 if (tid >= 0) { 346 remove_tid(sc, tid, toep->ce ? 2 : 1); 347 release_tid(sc, tid, toep->ctrlq); 348 } 349 350 if (toep->ce) 351 t4_release_clip_entry(sc, toep->ce); 352 353 if (toep->params.tc_idx != -1) 354 t4_release_cl_rl(sc, toep->vi->pi->port_id, toep->params.tc_idx); 355 356 mtx_lock(&td->toep_list_lock); 357 TAILQ_REMOVE(&td->toep_list, toep, link); 358 mtx_unlock(&td->toep_list_lock); 359 360 free_toepcb(toep); 361 } 362 363 /* 364 * The kernel is done with the TCP PCB and this is our opportunity to unhook the 365 * toepcb hanging off of it. If the TOE driver is also done with the toepcb (no 366 * pending CPL) then it is time to release all resources tied to the toepcb. 367 * 368 * Also gets called when an offloaded active open fails and the TOM wants the 369 * kernel to take the TCP PCB back. 370 */ 371 static void 372 t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp) 373 { 374 #if defined(KTR) || defined(INVARIANTS) 375 struct inpcb *inp = tp->t_inpcb; 376 #endif 377 struct toepcb *toep = tp->t_toe; 378 379 INP_WLOCK_ASSERT(inp); 380 381 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 382 KASSERT(toep->flags & TPF_ATTACHED, 383 ("%s: not attached", __func__)); 384 385 #ifdef KTR 386 if (tp->t_state == TCPS_SYN_SENT) { 387 CTR6(KTR_CXGBE, "%s: atid %d, toep %p (0x%x), inp %p (0x%x)", 388 __func__, toep->tid, toep, toep->flags, inp, 389 inp->inp_flags); 390 } else { 391 CTR6(KTR_CXGBE, 392 "t4_pcb_detach: tid %d (%s), toep %p (0x%x), inp %p (0x%x)", 393 toep->tid, tcpstates[tp->t_state], toep, toep->flags, inp, 394 inp->inp_flags); 395 } 396 #endif 397 398 if (ulp_mode(toep) == ULP_MODE_TLS) 399 tls_detach(toep); 400 401 tp->tod = NULL; 402 tp->t_toe = NULL; 403 tp->t_flags &= ~TF_TOE; 404 toep->flags &= ~TPF_ATTACHED; 405 406 if (!(toep->flags & TPF_CPL_PENDING)) 407 release_offload_resources(toep); 408 } 409 410 /* 411 * setsockopt handler. 412 */ 413 static void 414 t4_ctloutput(struct toedev *tod, struct tcpcb *tp, int dir, int name) 415 { 416 struct adapter *sc = tod->tod_softc; 417 struct toepcb *toep = tp->t_toe; 418 419 if (dir == SOPT_GET) 420 return; 421 422 CTR4(KTR_CXGBE, "%s: tp %p, dir %u, name %u", __func__, tp, dir, name); 423 424 switch (name) { 425 case TCP_NODELAY: 426 if (tp->t_state != TCPS_ESTABLISHED) 427 break; 428 toep->params.nagle = tp->t_flags & TF_NODELAY ? 0 : 1; 429 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS, 430 V_TF_NAGLE(1), V_TF_NAGLE(toep->params.nagle), 0, 0); 431 break; 432 default: 433 break; 434 } 435 } 436 437 static inline uint64_t 438 get_tcb_tflags(const uint64_t *tcb) 439 { 440 441 return ((be64toh(tcb[14]) << 32) | (be64toh(tcb[15]) >> 32)); 442 } 443 444 static inline uint32_t 445 get_tcb_field(const uint64_t *tcb, u_int word, uint32_t mask, u_int shift) 446 { 447 #define LAST_WORD ((TCB_SIZE / 4) - 1) 448 uint64_t t1, t2; 449 int flit_idx; 450 451 MPASS(mask != 0); 452 MPASS(word <= LAST_WORD); 453 MPASS(shift < 32); 454 455 flit_idx = (LAST_WORD - word) / 2; 456 if (word & 0x1) 457 shift += 32; 458 t1 = be64toh(tcb[flit_idx]) >> shift; 459 t2 = 0; 460 if (fls(mask) > 64 - shift) { 461 /* 462 * Will spill over into the next logical flit, which is the flit 463 * before this one. The flit_idx before this one must be valid. 464 */ 465 MPASS(flit_idx > 0); 466 t2 = be64toh(tcb[flit_idx - 1]) << (64 - shift); 467 } 468 return ((t2 | t1) & mask); 469 #undef LAST_WORD 470 } 471 #define GET_TCB_FIELD(tcb, F) \ 472 get_tcb_field(tcb, W_TCB_##F, M_TCB_##F, S_TCB_##F) 473 474 /* 475 * Issues a CPL_GET_TCB to read the entire TCB for the tid. 476 */ 477 static int 478 send_get_tcb(struct adapter *sc, u_int tid) 479 { 480 struct cpl_get_tcb *cpl; 481 struct wrq_cookie cookie; 482 483 MPASS(tid < sc->tids.ntids); 484 485 cpl = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*cpl), 16), 486 &cookie); 487 if (__predict_false(cpl == NULL)) 488 return (ENOMEM); 489 bzero(cpl, sizeof(*cpl)); 490 INIT_TP_WR(cpl, tid); 491 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_GET_TCB, tid)); 492 cpl->reply_ctrl = htobe16(V_REPLY_CHAN(0) | 493 V_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id)); 494 cpl->cookie = 0xff; 495 commit_wrq_wr(&sc->sge.ctrlq[0], cpl, &cookie); 496 497 return (0); 498 } 499 500 static struct tcb_histent * 501 alloc_tcb_histent(struct adapter *sc, u_int tid, int flags) 502 { 503 struct tcb_histent *te; 504 505 MPASS(flags == M_NOWAIT || flags == M_WAITOK); 506 507 te = malloc(sizeof(*te), M_CXGBE, M_ZERO | flags); 508 if (te == NULL) 509 return (NULL); 510 mtx_init(&te->te_lock, "TCB entry", NULL, MTX_DEF); 511 callout_init_mtx(&te->te_callout, &te->te_lock, 0); 512 te->te_adapter = sc; 513 te->te_tid = tid; 514 515 return (te); 516 } 517 518 static void 519 free_tcb_histent(struct tcb_histent *te) 520 { 521 522 mtx_destroy(&te->te_lock); 523 free(te, M_CXGBE); 524 } 525 526 /* 527 * Start tracking the tid in the TCB history. 528 */ 529 int 530 add_tid_to_history(struct adapter *sc, u_int tid) 531 { 532 struct tcb_histent *te = NULL; 533 struct tom_data *td = sc->tom_softc; 534 int rc; 535 536 MPASS(tid < sc->tids.ntids); 537 538 if (td->tcb_history == NULL) 539 return (ENXIO); 540 541 rw_wlock(&td->tcb_history_lock); 542 if (td->tcb_history[tid] != NULL) { 543 rc = EEXIST; 544 goto done; 545 } 546 te = alloc_tcb_histent(sc, tid, M_NOWAIT); 547 if (te == NULL) { 548 rc = ENOMEM; 549 goto done; 550 } 551 mtx_lock(&te->te_lock); 552 rc = send_get_tcb(sc, tid); 553 if (rc == 0) { 554 te->te_flags |= TE_RPL_PENDING; 555 td->tcb_history[tid] = te; 556 } else { 557 free(te, M_CXGBE); 558 } 559 mtx_unlock(&te->te_lock); 560 done: 561 rw_wunlock(&td->tcb_history_lock); 562 return (rc); 563 } 564 565 static void 566 remove_tcb_histent(struct tcb_histent *te) 567 { 568 struct adapter *sc = te->te_adapter; 569 struct tom_data *td = sc->tom_softc; 570 571 rw_assert(&td->tcb_history_lock, RA_WLOCKED); 572 mtx_assert(&te->te_lock, MA_OWNED); 573 MPASS(td->tcb_history[te->te_tid] == te); 574 575 td->tcb_history[te->te_tid] = NULL; 576 free_tcb_histent(te); 577 rw_wunlock(&td->tcb_history_lock); 578 } 579 580 static inline struct tcb_histent * 581 lookup_tcb_histent(struct adapter *sc, u_int tid, bool addrem) 582 { 583 struct tcb_histent *te; 584 struct tom_data *td = sc->tom_softc; 585 586 MPASS(tid < sc->tids.ntids); 587 588 if (td->tcb_history == NULL) 589 return (NULL); 590 591 if (addrem) 592 rw_wlock(&td->tcb_history_lock); 593 else 594 rw_rlock(&td->tcb_history_lock); 595 te = td->tcb_history[tid]; 596 if (te != NULL) { 597 mtx_lock(&te->te_lock); 598 return (te); /* with both locks held */ 599 } 600 if (addrem) 601 rw_wunlock(&td->tcb_history_lock); 602 else 603 rw_runlock(&td->tcb_history_lock); 604 605 return (te); 606 } 607 608 static inline void 609 release_tcb_histent(struct tcb_histent *te) 610 { 611 struct adapter *sc = te->te_adapter; 612 struct tom_data *td = sc->tom_softc; 613 614 mtx_assert(&te->te_lock, MA_OWNED); 615 mtx_unlock(&te->te_lock); 616 rw_assert(&td->tcb_history_lock, RA_RLOCKED); 617 rw_runlock(&td->tcb_history_lock); 618 } 619 620 static void 621 request_tcb(void *arg) 622 { 623 struct tcb_histent *te = arg; 624 625 mtx_assert(&te->te_lock, MA_OWNED); 626 627 /* Noone else is supposed to update the histent. */ 628 MPASS(!(te->te_flags & TE_RPL_PENDING)); 629 if (send_get_tcb(te->te_adapter, te->te_tid) == 0) 630 te->te_flags |= TE_RPL_PENDING; 631 else 632 callout_schedule(&te->te_callout, hz / 100); 633 } 634 635 static void 636 update_tcb_histent(struct tcb_histent *te, const uint64_t *tcb) 637 { 638 struct tom_data *td = te->te_adapter->tom_softc; 639 uint64_t tflags = get_tcb_tflags(tcb); 640 uint8_t sample = 0; 641 642 if (GET_TCB_FIELD(tcb, SND_MAX_RAW) != GET_TCB_FIELD(tcb, SND_UNA_RAW)) { 643 if (GET_TCB_FIELD(tcb, T_RXTSHIFT) != 0) 644 sample |= TS_RTO; 645 if (GET_TCB_FIELD(tcb, T_DUPACKS) != 0) 646 sample |= TS_DUPACKS; 647 if (GET_TCB_FIELD(tcb, T_DUPACKS) >= td->dupack_threshold) 648 sample |= TS_FASTREXMT; 649 } 650 651 if (GET_TCB_FIELD(tcb, SND_MAX_RAW) != 0) { 652 uint32_t snd_wnd; 653 654 sample |= TS_SND_BACKLOGGED; /* for whatever reason. */ 655 656 snd_wnd = GET_TCB_FIELD(tcb, RCV_ADV); 657 if (tflags & V_TF_RECV_SCALE(1)) 658 snd_wnd <<= GET_TCB_FIELD(tcb, RCV_SCALE); 659 if (GET_TCB_FIELD(tcb, SND_CWND) < snd_wnd) 660 sample |= TS_CWND_LIMITED; /* maybe due to CWND */ 661 } 662 663 if (tflags & V_TF_CCTRL_ECN(1)) { 664 665 /* 666 * CE marker on incoming IP hdr, echoing ECE back in the TCP 667 * hdr. Indicates congestion somewhere on the way from the peer 668 * to this node. 669 */ 670 if (tflags & V_TF_CCTRL_ECE(1)) 671 sample |= TS_ECN_ECE; 672 673 /* 674 * ECE seen and CWR sent (or about to be sent). Might indicate 675 * congestion on the way to the peer. This node is reducing its 676 * congestion window in response. 677 */ 678 if (tflags & (V_TF_CCTRL_CWR(1) | V_TF_CCTRL_RFR(1))) 679 sample |= TS_ECN_CWR; 680 } 681 682 te->te_sample[te->te_pidx] = sample; 683 if (++te->te_pidx == nitems(te->te_sample)) 684 te->te_pidx = 0; 685 memcpy(te->te_tcb, tcb, TCB_SIZE); 686 te->te_flags |= TE_ACTIVE; 687 } 688 689 static int 690 do_get_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 691 { 692 struct adapter *sc = iq->adapter; 693 const struct cpl_get_tcb_rpl *cpl = mtod(m, const void *); 694 const uint64_t *tcb = (const uint64_t *)(const void *)(cpl + 1); 695 struct tcb_histent *te; 696 const u_int tid = GET_TID(cpl); 697 bool remove; 698 699 remove = GET_TCB_FIELD(tcb, T_STATE) == TCPS_CLOSED; 700 te = lookup_tcb_histent(sc, tid, remove); 701 if (te == NULL) { 702 /* Not in the history. Who issued the GET_TCB for this? */ 703 device_printf(sc->dev, "tcb %u: flags 0x%016jx, state %u, " 704 "srtt %u, sscale %u, rscale %u, cookie 0x%x\n", tid, 705 (uintmax_t)get_tcb_tflags(tcb), GET_TCB_FIELD(tcb, T_STATE), 706 GET_TCB_FIELD(tcb, T_SRTT), GET_TCB_FIELD(tcb, SND_SCALE), 707 GET_TCB_FIELD(tcb, RCV_SCALE), cpl->cookie); 708 goto done; 709 } 710 711 MPASS(te->te_flags & TE_RPL_PENDING); 712 te->te_flags &= ~TE_RPL_PENDING; 713 if (remove) { 714 remove_tcb_histent(te); 715 } else { 716 update_tcb_histent(te, tcb); 717 callout_reset(&te->te_callout, hz / 10, request_tcb, te); 718 release_tcb_histent(te); 719 } 720 done: 721 m_freem(m); 722 return (0); 723 } 724 725 static void 726 fill_tcp_info_from_tcb(struct adapter *sc, uint64_t *tcb, struct tcp_info *ti) 727 { 728 uint32_t v; 729 730 ti->tcpi_state = GET_TCB_FIELD(tcb, T_STATE); 731 732 v = GET_TCB_FIELD(tcb, T_SRTT); 733 ti->tcpi_rtt = tcp_ticks_to_us(sc, v); 734 735 v = GET_TCB_FIELD(tcb, T_RTTVAR); 736 ti->tcpi_rttvar = tcp_ticks_to_us(sc, v); 737 738 ti->tcpi_snd_ssthresh = GET_TCB_FIELD(tcb, SND_SSTHRESH); 739 ti->tcpi_snd_cwnd = GET_TCB_FIELD(tcb, SND_CWND); 740 ti->tcpi_rcv_nxt = GET_TCB_FIELD(tcb, RCV_NXT); 741 742 v = GET_TCB_FIELD(tcb, TX_MAX); 743 ti->tcpi_snd_nxt = v - GET_TCB_FIELD(tcb, SND_NXT_RAW); 744 745 /* Receive window being advertised by us. */ 746 ti->tcpi_rcv_wscale = GET_TCB_FIELD(tcb, SND_SCALE); /* Yes, SND. */ 747 ti->tcpi_rcv_space = GET_TCB_FIELD(tcb, RCV_WND); 748 749 /* Send window */ 750 ti->tcpi_snd_wscale = GET_TCB_FIELD(tcb, RCV_SCALE); /* Yes, RCV. */ 751 ti->tcpi_snd_wnd = GET_TCB_FIELD(tcb, RCV_ADV); 752 if (get_tcb_tflags(tcb) & V_TF_RECV_SCALE(1)) 753 ti->tcpi_snd_wnd <<= ti->tcpi_snd_wscale; 754 else 755 ti->tcpi_snd_wscale = 0; 756 757 } 758 759 static void 760 fill_tcp_info_from_history(struct adapter *sc, struct tcb_histent *te, 761 struct tcp_info *ti) 762 { 763 764 fill_tcp_info_from_tcb(sc, te->te_tcb, ti); 765 } 766 767 /* 768 * Reads the TCB for the given tid using a memory window and copies it to 'buf' 769 * in the same format as CPL_GET_TCB_RPL. 770 */ 771 static void 772 read_tcb_using_memwin(struct adapter *sc, u_int tid, uint64_t *buf) 773 { 774 int i, j, k, rc; 775 uint32_t addr; 776 u_char *tcb, tmp; 777 778 MPASS(tid < sc->tids.ntids); 779 780 addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE; 781 rc = read_via_memwin(sc, 2, addr, (uint32_t *)buf, TCB_SIZE); 782 if (rc != 0) 783 return; 784 785 tcb = (u_char *)buf; 786 for (i = 0, j = TCB_SIZE - 16; i < j; i += 16, j -= 16) { 787 for (k = 0; k < 16; k++) { 788 tmp = tcb[i + k]; 789 tcb[i + k] = tcb[j + k]; 790 tcb[j + k] = tmp; 791 } 792 } 793 } 794 795 static void 796 fill_tcp_info(struct adapter *sc, u_int tid, struct tcp_info *ti) 797 { 798 uint64_t tcb[TCB_SIZE / sizeof(uint64_t)]; 799 struct tcb_histent *te; 800 801 ti->tcpi_toe_tid = tid; 802 te = lookup_tcb_histent(sc, tid, false); 803 if (te != NULL) { 804 fill_tcp_info_from_history(sc, te, ti); 805 release_tcb_histent(te); 806 } else { 807 if (!(sc->debug_flags & DF_DISABLE_TCB_CACHE)) { 808 /* XXX: tell firmware to flush TCB cache. */ 809 } 810 read_tcb_using_memwin(sc, tid, tcb); 811 fill_tcp_info_from_tcb(sc, tcb, ti); 812 } 813 } 814 815 /* 816 * Called by the kernel to allow the TOE driver to "refine" values filled up in 817 * the tcp_info for an offloaded connection. 818 */ 819 static void 820 t4_tcp_info(struct toedev *tod, struct tcpcb *tp, struct tcp_info *ti) 821 { 822 struct adapter *sc = tod->tod_softc; 823 struct toepcb *toep = tp->t_toe; 824 825 INP_WLOCK_ASSERT(tp->t_inpcb); 826 MPASS(ti != NULL); 827 828 fill_tcp_info(sc, toep->tid, ti); 829 } 830 831 #ifdef KERN_TLS 832 static int 833 t4_alloc_tls_session(struct toedev *tod, struct tcpcb *tp, 834 struct ktls_session *tls, int direction) 835 { 836 struct toepcb *toep = tp->t_toe; 837 838 INP_WLOCK_ASSERT(tp->t_inpcb); 839 MPASS(tls != NULL); 840 841 return (tls_alloc_ktls(toep, tls, direction)); 842 } 843 #endif 844 845 /* SET_TCB_FIELD sent as a ULP command looks like this */ 846 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \ 847 sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core)) 848 849 static void * 850 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask, 851 uint64_t val, uint32_t tid) 852 { 853 struct ulptx_idata *ulpsc; 854 struct cpl_set_tcb_field_core *req; 855 856 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); 857 ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16)); 858 859 ulpsc = (struct ulptx_idata *)(ulpmc + 1); 860 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 861 ulpsc->len = htobe32(sizeof(*req)); 862 863 req = (struct cpl_set_tcb_field_core *)(ulpsc + 1); 864 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 865 req->reply_ctrl = htobe16(V_NO_REPLY(1)); 866 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0)); 867 req->mask = htobe64(mask); 868 req->val = htobe64(val); 869 870 ulpsc = (struct ulptx_idata *)(req + 1); 871 if (LEN__SET_TCB_FIELD_ULP % 16) { 872 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 873 ulpsc->len = htobe32(0); 874 return (ulpsc + 1); 875 } 876 return (ulpsc); 877 } 878 879 static void 880 send_mss_flowc_wr(struct adapter *sc, struct toepcb *toep) 881 { 882 struct wrq_cookie cookie; 883 struct fw_flowc_wr *flowc; 884 struct ofld_tx_sdesc *txsd; 885 const int flowclen = sizeof(*flowc) + sizeof(struct fw_flowc_mnemval); 886 const int flowclen16 = howmany(flowclen, 16); 887 888 if (toep->tx_credits < flowclen16 || toep->txsd_avail == 0) { 889 CH_ERR(sc, "%s: tid %u out of tx credits (%d, %d).\n", __func__, 890 toep->tid, toep->tx_credits, toep->txsd_avail); 891 return; 892 } 893 894 flowc = start_wrq_wr(&toep->ofld_txq->wrq, flowclen16, &cookie); 895 if (__predict_false(flowc == NULL)) { 896 CH_ERR(sc, "ENOMEM in %s for tid %u.\n", __func__, toep->tid); 897 return; 898 } 899 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 900 V_FW_FLOWC_WR_NPARAMS(1)); 901 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(flowclen16) | 902 V_FW_WR_FLOWID(toep->tid)); 903 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_MSS; 904 flowc->mnemval[0].val = htobe32(toep->params.emss); 905 906 txsd = &toep->txsd[toep->txsd_pidx]; 907 txsd->tx_credits = flowclen16; 908 txsd->plen = 0; 909 toep->tx_credits -= txsd->tx_credits; 910 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 911 toep->txsd_pidx = 0; 912 toep->txsd_avail--; 913 commit_wrq_wr(&toep->ofld_txq->wrq, flowc, &cookie); 914 } 915 916 static void 917 t4_pmtu_update(struct toedev *tod, struct tcpcb *tp, tcp_seq seq, int mtu) 918 { 919 struct work_request_hdr *wrh; 920 struct ulp_txpkt *ulpmc; 921 int idx, len; 922 struct wrq_cookie cookie; 923 struct inpcb *inp = tp->t_inpcb; 924 struct toepcb *toep = tp->t_toe; 925 struct adapter *sc = td_adapter(toep->td); 926 unsigned short *mtus = &sc->params.mtus[0]; 927 928 INP_WLOCK_ASSERT(inp); 929 MPASS(mtu > 0); /* kernel is supposed to provide something usable. */ 930 931 /* tp->snd_una and snd_max are in host byte order too. */ 932 seq = be32toh(seq); 933 934 CTR6(KTR_CXGBE, "%s: tid %d, seq 0x%08x, mtu %u, mtu_idx %u (%d)", 935 __func__, toep->tid, seq, mtu, toep->params.mtu_idx, 936 mtus[toep->params.mtu_idx]); 937 938 if (ulp_mode(toep) == ULP_MODE_NONE && /* XXX: Read TCB otherwise? */ 939 (SEQ_LT(seq, tp->snd_una) || SEQ_GEQ(seq, tp->snd_max))) { 940 CTR5(KTR_CXGBE, 941 "%s: tid %d, seq 0x%08x not in range [0x%08x, 0x%08x).", 942 __func__, toep->tid, seq, tp->snd_una, tp->snd_max); 943 return; 944 } 945 946 /* Find the best mtu_idx for the suggested MTU. */ 947 for (idx = 0; idx < NMTUS - 1 && mtus[idx + 1] <= mtu; idx++) 948 continue; 949 if (idx >= toep->params.mtu_idx) 950 return; /* Never increase the PMTU (just like the kernel). */ 951 952 /* 953 * We'll send a compound work request with 2 SET_TCB_FIELDs -- the first 954 * one updates the mtu_idx and the second one triggers a retransmit. 955 */ 956 len = sizeof(*wrh) + 2 * roundup2(LEN__SET_TCB_FIELD_ULP, 16); 957 wrh = start_wrq_wr(toep->ctrlq, howmany(len, 16), &cookie); 958 if (wrh == NULL) { 959 CH_ERR(sc, "failed to change mtu_idx of tid %d (%u -> %u).\n", 960 toep->tid, toep->params.mtu_idx, idx); 961 return; 962 } 963 INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */ 964 ulpmc = (struct ulp_txpkt *)(wrh + 1); 965 ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_T_MAXSEG, 966 V_TCB_T_MAXSEG(M_TCB_T_MAXSEG), V_TCB_T_MAXSEG(idx), toep->tid); 967 ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_TIMESTAMP, 968 V_TCB_TIMESTAMP(0x7FFFFULL << 11), 0, toep->tid); 969 commit_wrq_wr(toep->ctrlq, wrh, &cookie); 970 971 /* Update the software toepcb and tcpcb. */ 972 toep->params.mtu_idx = idx; 973 tp->t_maxseg = mtus[toep->params.mtu_idx]; 974 if (inp->inp_inc.inc_flags & INC_ISIPV6) 975 tp->t_maxseg -= sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 976 else 977 tp->t_maxseg -= sizeof(struct ip) + sizeof(struct tcphdr); 978 toep->params.emss = tp->t_maxseg; 979 if (tp->t_flags & TF_RCVD_TSTMP) 980 toep->params.emss -= TCPOLEN_TSTAMP_APPA; 981 982 /* Update the firmware flowc. */ 983 send_mss_flowc_wr(sc, toep); 984 985 /* Update the MTU in the kernel's hostcache. */ 986 if (sc->tt.update_hc_on_pmtu_change != 0) { 987 struct in_conninfo inc = {0}; 988 989 inc.inc_fibnum = inp->inp_inc.inc_fibnum; 990 if (inp->inp_inc.inc_flags & INC_ISIPV6) { 991 inc.inc_flags |= INC_ISIPV6; 992 inc.inc6_faddr = inp->inp_inc.inc6_faddr; 993 } else { 994 inc.inc_faddr = inp->inp_inc.inc_faddr; 995 } 996 tcp_hc_updatemtu(&inc, mtu); 997 } 998 999 CTR6(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u), t_maxseg %u, emss %u", 1000 __func__, toep->tid, toep->params.mtu_idx, 1001 mtus[toep->params.mtu_idx], tp->t_maxseg, toep->params.emss); 1002 } 1003 1004 /* 1005 * The TOE driver will not receive any more CPLs for the tid associated with the 1006 * toepcb; release the hold on the inpcb. 1007 */ 1008 void 1009 final_cpl_received(struct toepcb *toep) 1010 { 1011 struct inpcb *inp = toep->inp; 1012 1013 KASSERT(inp != NULL, ("%s: inp is NULL", __func__)); 1014 INP_WLOCK_ASSERT(inp); 1015 KASSERT(toep->flags & TPF_CPL_PENDING, 1016 ("%s: CPL not pending already?", __func__)); 1017 1018 CTR6(KTR_CXGBE, "%s: tid %d, toep %p (0x%x), inp %p (0x%x)", 1019 __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags); 1020 1021 if (ulp_mode(toep) == ULP_MODE_TCPDDP) 1022 release_ddp_resources(toep); 1023 else if (ulp_mode(toep) == ULP_MODE_TLS) 1024 tls_detach(toep); 1025 toep->inp = NULL; 1026 toep->flags &= ~TPF_CPL_PENDING; 1027 mbufq_drain(&toep->ulp_pduq); 1028 mbufq_drain(&toep->ulp_pdu_reclaimq); 1029 1030 if (!(toep->flags & TPF_ATTACHED)) 1031 release_offload_resources(toep); 1032 1033 if (!in_pcbrele_wlocked(inp)) 1034 INP_WUNLOCK(inp); 1035 } 1036 1037 void 1038 insert_tid(struct adapter *sc, int tid, void *ctx, int ntids) 1039 { 1040 struct tid_info *t = &sc->tids; 1041 1042 MPASS(tid >= t->tid_base); 1043 MPASS(tid - t->tid_base < t->ntids); 1044 1045 t->tid_tab[tid - t->tid_base] = ctx; 1046 atomic_add_int(&t->tids_in_use, ntids); 1047 } 1048 1049 void * 1050 lookup_tid(struct adapter *sc, int tid) 1051 { 1052 struct tid_info *t = &sc->tids; 1053 1054 return (t->tid_tab[tid - t->tid_base]); 1055 } 1056 1057 void 1058 update_tid(struct adapter *sc, int tid, void *ctx) 1059 { 1060 struct tid_info *t = &sc->tids; 1061 1062 t->tid_tab[tid - t->tid_base] = ctx; 1063 } 1064 1065 void 1066 remove_tid(struct adapter *sc, int tid, int ntids) 1067 { 1068 struct tid_info *t = &sc->tids; 1069 1070 t->tid_tab[tid - t->tid_base] = NULL; 1071 atomic_subtract_int(&t->tids_in_use, ntids); 1072 } 1073 1074 /* 1075 * What mtu_idx to use, given a 4-tuple. Note that both s->mss and tcp_mssopt 1076 * have the MSS that we should advertise in our SYN. Advertised MSS doesn't 1077 * account for any TCP options so the effective MSS (only payload, no headers or 1078 * options) could be different. 1079 */ 1080 static int 1081 find_best_mtu_idx(struct adapter *sc, struct in_conninfo *inc, 1082 struct offload_settings *s) 1083 { 1084 unsigned short *mtus = &sc->params.mtus[0]; 1085 int i, mss, mtu; 1086 1087 MPASS(inc != NULL); 1088 1089 mss = s->mss > 0 ? s->mss : tcp_mssopt(inc); 1090 if (inc->inc_flags & INC_ISIPV6) 1091 mtu = mss + sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1092 else 1093 mtu = mss + sizeof(struct ip) + sizeof(struct tcphdr); 1094 1095 for (i = 0; i < NMTUS - 1 && mtus[i + 1] <= mtu; i++) 1096 continue; 1097 1098 return (i); 1099 } 1100 1101 /* 1102 * Determine the receive window size for a socket. 1103 */ 1104 u_long 1105 select_rcv_wnd(struct socket *so) 1106 { 1107 unsigned long wnd; 1108 1109 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1110 1111 wnd = sbspace(&so->so_rcv); 1112 if (wnd < MIN_RCV_WND) 1113 wnd = MIN_RCV_WND; 1114 1115 return min(wnd, MAX_RCV_WND); 1116 } 1117 1118 int 1119 select_rcv_wscale(void) 1120 { 1121 int wscale = 0; 1122 unsigned long space = sb_max; 1123 1124 if (space > MAX_RCV_WND) 1125 space = MAX_RCV_WND; 1126 1127 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < space) 1128 wscale++; 1129 1130 return (wscale); 1131 } 1132 1133 __be64 1134 calc_options0(struct vi_info *vi, struct conn_params *cp) 1135 { 1136 uint64_t opt0 = 0; 1137 1138 opt0 |= F_TCAM_BYPASS; 1139 1140 MPASS(cp->wscale >= 0 && cp->wscale <= M_WND_SCALE); 1141 opt0 |= V_WND_SCALE(cp->wscale); 1142 1143 MPASS(cp->mtu_idx >= 0 && cp->mtu_idx < NMTUS); 1144 opt0 |= V_MSS_IDX(cp->mtu_idx); 1145 1146 MPASS(cp->ulp_mode >= 0 && cp->ulp_mode <= M_ULP_MODE); 1147 opt0 |= V_ULP_MODE(cp->ulp_mode); 1148 1149 MPASS(cp->opt0_bufsize >= 0 && cp->opt0_bufsize <= M_RCV_BUFSIZ); 1150 opt0 |= V_RCV_BUFSIZ(cp->opt0_bufsize); 1151 1152 MPASS(cp->l2t_idx >= 0 && cp->l2t_idx < vi->adapter->vres.l2t.size); 1153 opt0 |= V_L2T_IDX(cp->l2t_idx); 1154 1155 opt0 |= V_SMAC_SEL(vi->smt_idx); 1156 opt0 |= V_TX_CHAN(vi->pi->tx_chan); 1157 1158 MPASS(cp->keepalive == 0 || cp->keepalive == 1); 1159 opt0 |= V_KEEP_ALIVE(cp->keepalive); 1160 1161 MPASS(cp->nagle == 0 || cp->nagle == 1); 1162 opt0 |= V_NAGLE(cp->nagle); 1163 1164 return (htobe64(opt0)); 1165 } 1166 1167 __be32 1168 calc_options2(struct vi_info *vi, struct conn_params *cp) 1169 { 1170 uint32_t opt2 = 0; 1171 struct port_info *pi = vi->pi; 1172 struct adapter *sc = pi->adapter; 1173 1174 /* 1175 * rx flow control, rx coalesce, congestion control, and tx pace are all 1176 * explicitly set by the driver. On T5+ the ISS is also set by the 1177 * driver to the value picked by the kernel. 1178 */ 1179 if (is_t4(sc)) { 1180 opt2 |= F_RX_FC_VALID | F_RX_COALESCE_VALID; 1181 opt2 |= F_CONG_CNTRL_VALID | F_PACE_VALID; 1182 } else { 1183 opt2 |= F_T5_OPT_2_VALID; /* all 4 valid */ 1184 opt2 |= F_T5_ISS; /* ISS provided in CPL */ 1185 } 1186 1187 MPASS(cp->sack == 0 || cp->sack == 1); 1188 opt2 |= V_SACK_EN(cp->sack); 1189 1190 MPASS(cp->tstamp == 0 || cp->tstamp == 1); 1191 opt2 |= V_TSTAMPS_EN(cp->tstamp); 1192 1193 if (cp->wscale > 0) 1194 opt2 |= F_WND_SCALE_EN; 1195 1196 MPASS(cp->ecn == 0 || cp->ecn == 1); 1197 opt2 |= V_CCTRL_ECN(cp->ecn); 1198 1199 /* XXX: F_RX_CHANNEL for multiple rx c-chan support goes here. */ 1200 1201 opt2 |= V_TX_QUEUE(sc->params.tp.tx_modq[pi->tx_chan]); 1202 opt2 |= V_PACE(0); 1203 opt2 |= F_RSS_QUEUE_VALID; 1204 opt2 |= V_RSS_QUEUE(sc->sge.ofld_rxq[cp->rxq_idx].iq.abs_id); 1205 1206 MPASS(cp->cong_algo >= 0 && cp->cong_algo <= M_CONG_CNTRL); 1207 opt2 |= V_CONG_CNTRL(cp->cong_algo); 1208 1209 MPASS(cp->rx_coalesce == 0 || cp->rx_coalesce == 1); 1210 if (cp->rx_coalesce == 1) 1211 opt2 |= V_RX_COALESCE(M_RX_COALESCE); 1212 1213 opt2 |= V_RX_FC_DDP(0) | V_RX_FC_DISABLE(0); 1214 #ifdef USE_DDP_RX_FLOW_CONTROL 1215 if (cp->ulp_mode == ULP_MODE_TCPDDP) 1216 opt2 |= F_RX_FC_DDP; 1217 #endif 1218 1219 return (htobe32(opt2)); 1220 } 1221 1222 uint64_t 1223 select_ntuple(struct vi_info *vi, struct l2t_entry *e) 1224 { 1225 struct adapter *sc = vi->adapter; 1226 struct tp_params *tp = &sc->params.tp; 1227 uint64_t ntuple = 0; 1228 1229 /* 1230 * Initialize each of the fields which we care about which are present 1231 * in the Compressed Filter Tuple. 1232 */ 1233 if (tp->vlan_shift >= 0 && EVL_VLANOFTAG(e->vlan) != CPL_L2T_VLAN_NONE) 1234 ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift; 1235 1236 if (tp->port_shift >= 0) 1237 ntuple |= (uint64_t)e->lport << tp->port_shift; 1238 1239 if (tp->protocol_shift >= 0) 1240 ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift; 1241 1242 if (tp->vnic_shift >= 0 && tp->vnic_mode == FW_VNIC_MODE_PF_VF) { 1243 ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vi->vin) | 1244 V_FT_VNID_ID_PF(sc->pf) | V_FT_VNID_ID_VLD(vi->vfvld)) << 1245 tp->vnic_shift; 1246 } 1247 1248 if (is_t4(sc)) 1249 return (htobe32((uint32_t)ntuple)); 1250 else 1251 return (htobe64(V_FILTER_TUPLE(ntuple))); 1252 } 1253 1254 static int 1255 is_tls_sock(struct socket *so, struct adapter *sc) 1256 { 1257 struct inpcb *inp = sotoinpcb(so); 1258 int i, rc; 1259 1260 /* XXX: Eventually add a SO_WANT_TLS socket option perhaps? */ 1261 rc = 0; 1262 ADAPTER_LOCK(sc); 1263 for (i = 0; i < sc->tt.num_tls_rx_ports; i++) { 1264 if (inp->inp_lport == htons(sc->tt.tls_rx_ports[i]) || 1265 inp->inp_fport == htons(sc->tt.tls_rx_ports[i])) { 1266 rc = 1; 1267 break; 1268 } 1269 } 1270 ADAPTER_UNLOCK(sc); 1271 return (rc); 1272 } 1273 1274 /* 1275 * Initialize various connection parameters. 1276 */ 1277 void 1278 init_conn_params(struct vi_info *vi , struct offload_settings *s, 1279 struct in_conninfo *inc, struct socket *so, 1280 const struct tcp_options *tcpopt, int16_t l2t_idx, struct conn_params *cp) 1281 { 1282 struct port_info *pi = vi->pi; 1283 struct adapter *sc = pi->adapter; 1284 struct tom_tunables *tt = &sc->tt; 1285 struct inpcb *inp = sotoinpcb(so); 1286 struct tcpcb *tp = intotcpcb(inp); 1287 u_long wnd; 1288 1289 MPASS(s->offload != 0); 1290 1291 /* Congestion control algorithm */ 1292 if (s->cong_algo >= 0) 1293 cp->cong_algo = s->cong_algo & M_CONG_CNTRL; 1294 else if (sc->tt.cong_algorithm >= 0) 1295 cp->cong_algo = tt->cong_algorithm & M_CONG_CNTRL; 1296 else { 1297 struct cc_algo *cc = CC_ALGO(tp); 1298 1299 if (strcasecmp(cc->name, "reno") == 0) 1300 cp->cong_algo = CONG_ALG_RENO; 1301 else if (strcasecmp(cc->name, "tahoe") == 0) 1302 cp->cong_algo = CONG_ALG_TAHOE; 1303 if (strcasecmp(cc->name, "newreno") == 0) 1304 cp->cong_algo = CONG_ALG_NEWRENO; 1305 if (strcasecmp(cc->name, "highspeed") == 0) 1306 cp->cong_algo = CONG_ALG_HIGHSPEED; 1307 else { 1308 /* 1309 * Use newreno in case the algorithm selected by the 1310 * host stack is not supported by the hardware. 1311 */ 1312 cp->cong_algo = CONG_ALG_NEWRENO; 1313 } 1314 } 1315 1316 /* Tx traffic scheduling class. */ 1317 if (s->sched_class >= 0 && 1318 s->sched_class < sc->chip_params->nsched_cls) { 1319 cp->tc_idx = s->sched_class; 1320 } else 1321 cp->tc_idx = -1; 1322 1323 /* Nagle's algorithm. */ 1324 if (s->nagle >= 0) 1325 cp->nagle = s->nagle > 0 ? 1 : 0; 1326 else 1327 cp->nagle = tp->t_flags & TF_NODELAY ? 0 : 1; 1328 1329 /* TCP Keepalive. */ 1330 if (V_tcp_always_keepalive || so_options_get(so) & SO_KEEPALIVE) 1331 cp->keepalive = 1; 1332 else 1333 cp->keepalive = 0; 1334 1335 /* Optimization that's specific to T5 @ 40G. */ 1336 if (tt->tx_align >= 0) 1337 cp->tx_align = tt->tx_align > 0 ? 1 : 0; 1338 else if (chip_id(sc) == CHELSIO_T5 && 1339 (port_top_speed(pi) > 10 || sc->params.nports > 2)) 1340 cp->tx_align = 1; 1341 else 1342 cp->tx_align = 0; 1343 1344 /* ULP mode. */ 1345 if (can_tls_offload(sc) && 1346 (s->tls > 0 || (s->tls < 0 && is_tls_sock(so, sc)))) 1347 cp->ulp_mode = ULP_MODE_TLS; 1348 else if (s->ddp > 0 || 1349 (s->ddp < 0 && sc->tt.ddp && (so_options_get(so) & SO_NO_DDP) == 0)) 1350 cp->ulp_mode = ULP_MODE_TCPDDP; 1351 else 1352 cp->ulp_mode = ULP_MODE_NONE; 1353 1354 /* Rx coalescing. */ 1355 if (s->rx_coalesce >= 0) 1356 cp->rx_coalesce = s->rx_coalesce > 0 ? 1 : 0; 1357 else if (cp->ulp_mode == ULP_MODE_TLS) 1358 cp->rx_coalesce = 0; 1359 else if (tt->rx_coalesce >= 0) 1360 cp->rx_coalesce = tt->rx_coalesce > 0 ? 1 : 0; 1361 else 1362 cp->rx_coalesce = 1; /* default */ 1363 1364 /* 1365 * Index in the PMTU table. This controls the MSS that we announce in 1366 * our SYN initially, but after ESTABLISHED it controls the MSS that we 1367 * use to send data. 1368 */ 1369 cp->mtu_idx = find_best_mtu_idx(sc, inc, s); 1370 1371 /* Tx queue for this connection. */ 1372 if (s->txq >= 0 && s->txq < vi->nofldtxq) 1373 cp->txq_idx = s->txq; 1374 else 1375 cp->txq_idx = arc4random() % vi->nofldtxq; 1376 cp->txq_idx += vi->first_ofld_txq; 1377 1378 /* Rx queue for this connection. */ 1379 if (s->rxq >= 0 && s->rxq < vi->nofldrxq) 1380 cp->rxq_idx = s->rxq; 1381 else 1382 cp->rxq_idx = arc4random() % vi->nofldrxq; 1383 cp->rxq_idx += vi->first_ofld_rxq; 1384 1385 if (SOLISTENING(so)) { 1386 /* Passive open */ 1387 MPASS(tcpopt != NULL); 1388 1389 /* TCP timestamp option */ 1390 if (tcpopt->tstamp && 1391 (s->tstamp > 0 || (s->tstamp < 0 && V_tcp_do_rfc1323))) 1392 cp->tstamp = 1; 1393 else 1394 cp->tstamp = 0; 1395 1396 /* SACK */ 1397 if (tcpopt->sack && 1398 (s->sack > 0 || (s->sack < 0 && V_tcp_do_sack))) 1399 cp->sack = 1; 1400 else 1401 cp->sack = 0; 1402 1403 /* Receive window scaling. */ 1404 if (tcpopt->wsf > 0 && tcpopt->wsf < 15 && V_tcp_do_rfc1323) 1405 cp->wscale = select_rcv_wscale(); 1406 else 1407 cp->wscale = 0; 1408 1409 /* ECN */ 1410 if (tcpopt->ecn && /* XXX: review. */ 1411 (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn))) 1412 cp->ecn = 1; 1413 else 1414 cp->ecn = 0; 1415 1416 wnd = max(so->sol_sbrcv_hiwat, MIN_RCV_WND); 1417 cp->opt0_bufsize = min(wnd >> 10, M_RCV_BUFSIZ); 1418 1419 if (tt->sndbuf > 0) 1420 cp->sndbuf = tt->sndbuf; 1421 else if (so->sol_sbsnd_flags & SB_AUTOSIZE && 1422 V_tcp_do_autosndbuf) 1423 cp->sndbuf = 256 * 1024; 1424 else 1425 cp->sndbuf = so->sol_sbsnd_hiwat; 1426 } else { 1427 /* Active open */ 1428 1429 /* TCP timestamp option */ 1430 if (s->tstamp > 0 || 1431 (s->tstamp < 0 && (tp->t_flags & TF_REQ_TSTMP))) 1432 cp->tstamp = 1; 1433 else 1434 cp->tstamp = 0; 1435 1436 /* SACK */ 1437 if (s->sack > 0 || 1438 (s->sack < 0 && (tp->t_flags & TF_SACK_PERMIT))) 1439 cp->sack = 1; 1440 else 1441 cp->sack = 0; 1442 1443 /* Receive window scaling */ 1444 if (tp->t_flags & TF_REQ_SCALE) 1445 cp->wscale = select_rcv_wscale(); 1446 else 1447 cp->wscale = 0; 1448 1449 /* ECN */ 1450 if (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn == 1)) 1451 cp->ecn = 1; 1452 else 1453 cp->ecn = 0; 1454 1455 SOCKBUF_LOCK(&so->so_rcv); 1456 wnd = max(select_rcv_wnd(so), MIN_RCV_WND); 1457 SOCKBUF_UNLOCK(&so->so_rcv); 1458 cp->opt0_bufsize = min(wnd >> 10, M_RCV_BUFSIZ); 1459 1460 if (tt->sndbuf > 0) 1461 cp->sndbuf = tt->sndbuf; 1462 else { 1463 SOCKBUF_LOCK(&so->so_snd); 1464 if (so->so_snd.sb_flags & SB_AUTOSIZE && 1465 V_tcp_do_autosndbuf) 1466 cp->sndbuf = 256 * 1024; 1467 else 1468 cp->sndbuf = so->so_snd.sb_hiwat; 1469 SOCKBUF_UNLOCK(&so->so_snd); 1470 } 1471 } 1472 1473 cp->l2t_idx = l2t_idx; 1474 1475 /* This will be initialized on ESTABLISHED. */ 1476 cp->emss = 0; 1477 } 1478 1479 int 1480 negative_advice(int status) 1481 { 1482 1483 return (status == CPL_ERR_RTX_NEG_ADVICE || 1484 status == CPL_ERR_PERSIST_NEG_ADVICE || 1485 status == CPL_ERR_KEEPALV_NEG_ADVICE); 1486 } 1487 1488 static int 1489 alloc_tid_tab(struct tid_info *t, int flags) 1490 { 1491 1492 MPASS(t->ntids > 0); 1493 MPASS(t->tid_tab == NULL); 1494 1495 t->tid_tab = malloc(t->ntids * sizeof(*t->tid_tab), M_CXGBE, 1496 M_ZERO | flags); 1497 if (t->tid_tab == NULL) 1498 return (ENOMEM); 1499 atomic_store_rel_int(&t->tids_in_use, 0); 1500 1501 return (0); 1502 } 1503 1504 static void 1505 free_tid_tab(struct tid_info *t) 1506 { 1507 1508 KASSERT(t->tids_in_use == 0, 1509 ("%s: %d tids still in use.", __func__, t->tids_in_use)); 1510 1511 free(t->tid_tab, M_CXGBE); 1512 t->tid_tab = NULL; 1513 } 1514 1515 static int 1516 alloc_stid_tab(struct tid_info *t, int flags) 1517 { 1518 1519 MPASS(t->nstids > 0); 1520 MPASS(t->stid_tab == NULL); 1521 1522 t->stid_tab = malloc(t->nstids * sizeof(*t->stid_tab), M_CXGBE, 1523 M_ZERO | flags); 1524 if (t->stid_tab == NULL) 1525 return (ENOMEM); 1526 mtx_init(&t->stid_lock, "stid lock", NULL, MTX_DEF); 1527 t->stids_in_use = 0; 1528 TAILQ_INIT(&t->stids); 1529 t->nstids_free_head = t->nstids; 1530 1531 return (0); 1532 } 1533 1534 static void 1535 free_stid_tab(struct tid_info *t) 1536 { 1537 1538 KASSERT(t->stids_in_use == 0, 1539 ("%s: %d tids still in use.", __func__, t->stids_in_use)); 1540 1541 if (mtx_initialized(&t->stid_lock)) 1542 mtx_destroy(&t->stid_lock); 1543 free(t->stid_tab, M_CXGBE); 1544 t->stid_tab = NULL; 1545 } 1546 1547 static void 1548 free_tid_tabs(struct tid_info *t) 1549 { 1550 1551 free_tid_tab(t); 1552 free_stid_tab(t); 1553 } 1554 1555 static int 1556 alloc_tid_tabs(struct tid_info *t) 1557 { 1558 int rc; 1559 1560 rc = alloc_tid_tab(t, M_NOWAIT); 1561 if (rc != 0) 1562 goto failed; 1563 1564 rc = alloc_stid_tab(t, M_NOWAIT); 1565 if (rc != 0) 1566 goto failed; 1567 1568 return (0); 1569 failed: 1570 free_tid_tabs(t); 1571 return (rc); 1572 } 1573 1574 static inline void 1575 alloc_tcb_history(struct adapter *sc, struct tom_data *td) 1576 { 1577 1578 if (sc->tids.ntids == 0 || sc->tids.ntids > 1024) 1579 return; 1580 rw_init(&td->tcb_history_lock, "TCB history"); 1581 td->tcb_history = malloc(sc->tids.ntids * sizeof(*td->tcb_history), 1582 M_CXGBE, M_ZERO | M_NOWAIT); 1583 td->dupack_threshold = G_DUPACKTHRESH(t4_read_reg(sc, A_TP_PARA_REG0)); 1584 } 1585 1586 static inline void 1587 free_tcb_history(struct adapter *sc, struct tom_data *td) 1588 { 1589 #ifdef INVARIANTS 1590 int i; 1591 1592 if (td->tcb_history != NULL) { 1593 for (i = 0; i < sc->tids.ntids; i++) { 1594 MPASS(td->tcb_history[i] == NULL); 1595 } 1596 } 1597 #endif 1598 free(td->tcb_history, M_CXGBE); 1599 if (rw_initialized(&td->tcb_history_lock)) 1600 rw_destroy(&td->tcb_history_lock); 1601 } 1602 1603 static void 1604 free_tom_data(struct adapter *sc, struct tom_data *td) 1605 { 1606 1607 ASSERT_SYNCHRONIZED_OP(sc); 1608 1609 KASSERT(TAILQ_EMPTY(&td->toep_list), 1610 ("%s: TOE PCB list is not empty.", __func__)); 1611 KASSERT(td->lctx_count == 0, 1612 ("%s: lctx hash table is not empty.", __func__)); 1613 1614 t4_free_ppod_region(&td->pr); 1615 1616 if (td->listen_mask != 0) 1617 hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask); 1618 1619 if (mtx_initialized(&td->unsent_wr_lock)) 1620 mtx_destroy(&td->unsent_wr_lock); 1621 if (mtx_initialized(&td->lctx_hash_lock)) 1622 mtx_destroy(&td->lctx_hash_lock); 1623 if (mtx_initialized(&td->toep_list_lock)) 1624 mtx_destroy(&td->toep_list_lock); 1625 1626 free_tcb_history(sc, td); 1627 free_tid_tabs(&sc->tids); 1628 free(td, M_CXGBE); 1629 } 1630 1631 static char * 1632 prepare_pkt(int open_type, uint16_t vtag, struct inpcb *inp, int *pktlen, 1633 int *buflen) 1634 { 1635 char *pkt; 1636 struct tcphdr *th; 1637 int ipv6, len; 1638 const int maxlen = 1639 max(sizeof(struct ether_header), sizeof(struct ether_vlan_header)) + 1640 max(sizeof(struct ip), sizeof(struct ip6_hdr)) + 1641 sizeof(struct tcphdr); 1642 1643 MPASS(open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN); 1644 1645 pkt = malloc(maxlen, M_CXGBE, M_ZERO | M_NOWAIT); 1646 if (pkt == NULL) 1647 return (NULL); 1648 1649 ipv6 = inp->inp_vflag & INP_IPV6; 1650 len = 0; 1651 1652 if (EVL_VLANOFTAG(vtag) == 0xfff) { 1653 struct ether_header *eh = (void *)pkt; 1654 1655 if (ipv6) 1656 eh->ether_type = htons(ETHERTYPE_IPV6); 1657 else 1658 eh->ether_type = htons(ETHERTYPE_IP); 1659 1660 len += sizeof(*eh); 1661 } else { 1662 struct ether_vlan_header *evh = (void *)pkt; 1663 1664 evh->evl_encap_proto = htons(ETHERTYPE_VLAN); 1665 evh->evl_tag = htons(vtag); 1666 if (ipv6) 1667 evh->evl_proto = htons(ETHERTYPE_IPV6); 1668 else 1669 evh->evl_proto = htons(ETHERTYPE_IP); 1670 1671 len += sizeof(*evh); 1672 } 1673 1674 if (ipv6) { 1675 struct ip6_hdr *ip6 = (void *)&pkt[len]; 1676 1677 ip6->ip6_vfc = IPV6_VERSION; 1678 ip6->ip6_plen = htons(sizeof(struct tcphdr)); 1679 ip6->ip6_nxt = IPPROTO_TCP; 1680 if (open_type == OPEN_TYPE_ACTIVE) { 1681 ip6->ip6_src = inp->in6p_laddr; 1682 ip6->ip6_dst = inp->in6p_faddr; 1683 } else if (open_type == OPEN_TYPE_LISTEN) { 1684 ip6->ip6_src = inp->in6p_laddr; 1685 ip6->ip6_dst = ip6->ip6_src; 1686 } 1687 1688 len += sizeof(*ip6); 1689 } else { 1690 struct ip *ip = (void *)&pkt[len]; 1691 1692 ip->ip_v = IPVERSION; 1693 ip->ip_hl = sizeof(*ip) >> 2; 1694 ip->ip_tos = inp->inp_ip_tos; 1695 ip->ip_len = htons(sizeof(struct ip) + sizeof(struct tcphdr)); 1696 ip->ip_ttl = inp->inp_ip_ttl; 1697 ip->ip_p = IPPROTO_TCP; 1698 if (open_type == OPEN_TYPE_ACTIVE) { 1699 ip->ip_src = inp->inp_laddr; 1700 ip->ip_dst = inp->inp_faddr; 1701 } else if (open_type == OPEN_TYPE_LISTEN) { 1702 ip->ip_src = inp->inp_laddr; 1703 ip->ip_dst = ip->ip_src; 1704 } 1705 1706 len += sizeof(*ip); 1707 } 1708 1709 th = (void *)&pkt[len]; 1710 if (open_type == OPEN_TYPE_ACTIVE) { 1711 th->th_sport = inp->inp_lport; /* network byte order already */ 1712 th->th_dport = inp->inp_fport; /* ditto */ 1713 } else if (open_type == OPEN_TYPE_LISTEN) { 1714 th->th_sport = inp->inp_lport; /* network byte order already */ 1715 th->th_dport = th->th_sport; 1716 } 1717 len += sizeof(th); 1718 1719 *pktlen = *buflen = len; 1720 return (pkt); 1721 } 1722 1723 const struct offload_settings * 1724 lookup_offload_policy(struct adapter *sc, int open_type, struct mbuf *m, 1725 uint16_t vtag, struct inpcb *inp) 1726 { 1727 const struct t4_offload_policy *op; 1728 char *pkt; 1729 struct offload_rule *r; 1730 int i, matched, pktlen, buflen; 1731 static const struct offload_settings allow_offloading_settings = { 1732 .offload = 1, 1733 .rx_coalesce = -1, 1734 .cong_algo = -1, 1735 .sched_class = -1, 1736 .tstamp = -1, 1737 .sack = -1, 1738 .nagle = -1, 1739 .ecn = -1, 1740 .ddp = -1, 1741 .tls = -1, 1742 .txq = -1, 1743 .rxq = -1, 1744 .mss = -1, 1745 }; 1746 static const struct offload_settings disallow_offloading_settings = { 1747 .offload = 0, 1748 /* rest is irrelevant when offload is off. */ 1749 }; 1750 1751 rw_assert(&sc->policy_lock, RA_LOCKED); 1752 1753 /* 1754 * If there's no Connection Offloading Policy attached to the device 1755 * then we need to return a default static policy. If 1756 * "cop_managed_offloading" is true, then we need to disallow 1757 * offloading until a COP is attached to the device. Otherwise we 1758 * allow offloading ... 1759 */ 1760 op = sc->policy; 1761 if (op == NULL) { 1762 if (sc->tt.cop_managed_offloading) 1763 return (&disallow_offloading_settings); 1764 else 1765 return (&allow_offloading_settings); 1766 } 1767 1768 switch (open_type) { 1769 case OPEN_TYPE_ACTIVE: 1770 case OPEN_TYPE_LISTEN: 1771 pkt = prepare_pkt(open_type, vtag, inp, &pktlen, &buflen); 1772 break; 1773 case OPEN_TYPE_PASSIVE: 1774 MPASS(m != NULL); 1775 pkt = mtod(m, char *); 1776 MPASS(*pkt == CPL_PASS_ACCEPT_REQ); 1777 pkt += sizeof(struct cpl_pass_accept_req); 1778 pktlen = m->m_pkthdr.len - sizeof(struct cpl_pass_accept_req); 1779 buflen = m->m_len - sizeof(struct cpl_pass_accept_req); 1780 break; 1781 default: 1782 MPASS(0); 1783 return (&disallow_offloading_settings); 1784 } 1785 1786 if (pkt == NULL || pktlen == 0 || buflen == 0) 1787 return (&disallow_offloading_settings); 1788 1789 matched = 0; 1790 r = &op->rule[0]; 1791 for (i = 0; i < op->nrules; i++, r++) { 1792 if (r->open_type != open_type && 1793 r->open_type != OPEN_TYPE_DONTCARE) { 1794 continue; 1795 } 1796 matched = bpf_filter(r->bpf_prog.bf_insns, pkt, pktlen, buflen); 1797 if (matched) 1798 break; 1799 } 1800 1801 if (open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN) 1802 free(pkt, M_CXGBE); 1803 1804 return (matched ? &r->settings : &disallow_offloading_settings); 1805 } 1806 1807 static void 1808 reclaim_wr_resources(void *arg, int count) 1809 { 1810 struct tom_data *td = arg; 1811 STAILQ_HEAD(, wrqe) twr_list = STAILQ_HEAD_INITIALIZER(twr_list); 1812 struct cpl_act_open_req *cpl; 1813 u_int opcode, atid, tid; 1814 struct wrqe *wr; 1815 struct adapter *sc = td_adapter(td); 1816 1817 mtx_lock(&td->unsent_wr_lock); 1818 STAILQ_SWAP(&td->unsent_wr_list, &twr_list, wrqe); 1819 mtx_unlock(&td->unsent_wr_lock); 1820 1821 while ((wr = STAILQ_FIRST(&twr_list)) != NULL) { 1822 STAILQ_REMOVE_HEAD(&twr_list, link); 1823 1824 cpl = wrtod(wr); 1825 opcode = GET_OPCODE(cpl); 1826 1827 switch (opcode) { 1828 case CPL_ACT_OPEN_REQ: 1829 case CPL_ACT_OPEN_REQ6: 1830 atid = G_TID_TID(be32toh(OPCODE_TID(cpl))); 1831 CTR2(KTR_CXGBE, "%s: atid %u ", __func__, atid); 1832 act_open_failure_cleanup(sc, atid, EHOSTUNREACH); 1833 free(wr, M_CXGBE); 1834 break; 1835 case CPL_PASS_ACCEPT_RPL: 1836 tid = GET_TID(cpl); 1837 CTR2(KTR_CXGBE, "%s: tid %u ", __func__, tid); 1838 synack_failure_cleanup(sc, tid); 1839 free(wr, M_CXGBE); 1840 break; 1841 default: 1842 log(LOG_ERR, "%s: leaked work request %p, wr_len %d, " 1843 "opcode %x\n", __func__, wr, wr->wr_len, opcode); 1844 /* WR not freed here; go look at it with a debugger. */ 1845 } 1846 } 1847 } 1848 1849 /* 1850 * Ground control to Major TOM 1851 * Commencing countdown, engines on 1852 */ 1853 static int 1854 t4_tom_activate(struct adapter *sc) 1855 { 1856 struct tom_data *td; 1857 struct toedev *tod; 1858 struct vi_info *vi; 1859 int i, rc, v; 1860 1861 ASSERT_SYNCHRONIZED_OP(sc); 1862 1863 /* per-adapter softc for TOM */ 1864 td = malloc(sizeof(*td), M_CXGBE, M_ZERO | M_NOWAIT); 1865 if (td == NULL) 1866 return (ENOMEM); 1867 1868 /* List of TOE PCBs and associated lock */ 1869 mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF); 1870 TAILQ_INIT(&td->toep_list); 1871 1872 /* Listen context */ 1873 mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF); 1874 td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE, 1875 &td->listen_mask, HASH_NOWAIT); 1876 1877 /* List of WRs for which L2 resolution failed */ 1878 mtx_init(&td->unsent_wr_lock, "Unsent WR list lock", NULL, MTX_DEF); 1879 STAILQ_INIT(&td->unsent_wr_list); 1880 TASK_INIT(&td->reclaim_wr_resources, 0, reclaim_wr_resources, td); 1881 1882 /* TID tables */ 1883 rc = alloc_tid_tabs(&sc->tids); 1884 if (rc != 0) 1885 goto done; 1886 1887 rc = t4_init_ppod_region(&td->pr, &sc->vres.ddp, 1888 t4_read_reg(sc, A_ULP_RX_TDDP_PSZ), "TDDP page pods"); 1889 if (rc != 0) 1890 goto done; 1891 t4_set_reg_field(sc, A_ULP_RX_TDDP_TAGMASK, 1892 V_TDDPTAGMASK(M_TDDPTAGMASK), td->pr.pr_tag_mask); 1893 1894 alloc_tcb_history(sc, td); 1895 1896 /* toedev ops */ 1897 tod = &td->tod; 1898 init_toedev(tod); 1899 tod->tod_softc = sc; 1900 tod->tod_connect = t4_connect; 1901 tod->tod_listen_start = t4_listen_start; 1902 tod->tod_listen_stop = t4_listen_stop; 1903 tod->tod_rcvd = t4_rcvd; 1904 tod->tod_output = t4_tod_output; 1905 tod->tod_send_rst = t4_send_rst; 1906 tod->tod_send_fin = t4_send_fin; 1907 tod->tod_pcb_detach = t4_pcb_detach; 1908 tod->tod_l2_update = t4_l2_update; 1909 tod->tod_syncache_added = t4_syncache_added; 1910 tod->tod_syncache_removed = t4_syncache_removed; 1911 tod->tod_syncache_respond = t4_syncache_respond; 1912 tod->tod_offload_socket = t4_offload_socket; 1913 tod->tod_ctloutput = t4_ctloutput; 1914 tod->tod_tcp_info = t4_tcp_info; 1915 #ifdef KERN_TLS 1916 tod->tod_alloc_tls_session = t4_alloc_tls_session; 1917 #endif 1918 tod->tod_pmtu_update = t4_pmtu_update; 1919 1920 for_each_port(sc, i) { 1921 for_each_vi(sc->port[i], v, vi) { 1922 TOEDEV(vi->ifp) = &td->tod; 1923 } 1924 } 1925 1926 sc->tom_softc = td; 1927 register_toedev(sc->tom_softc); 1928 1929 done: 1930 if (rc != 0) 1931 free_tom_data(sc, td); 1932 return (rc); 1933 } 1934 1935 static int 1936 t4_tom_deactivate(struct adapter *sc) 1937 { 1938 int rc = 0; 1939 struct tom_data *td = sc->tom_softc; 1940 1941 ASSERT_SYNCHRONIZED_OP(sc); 1942 1943 if (td == NULL) 1944 return (0); /* XXX. KASSERT? */ 1945 1946 if (sc->offload_map != 0) 1947 return (EBUSY); /* at least one port has IFCAP_TOE enabled */ 1948 1949 if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI)) 1950 return (EBUSY); /* both iWARP and iSCSI rely on the TOE. */ 1951 1952 mtx_lock(&td->toep_list_lock); 1953 if (!TAILQ_EMPTY(&td->toep_list)) 1954 rc = EBUSY; 1955 mtx_unlock(&td->toep_list_lock); 1956 1957 mtx_lock(&td->lctx_hash_lock); 1958 if (td->lctx_count > 0) 1959 rc = EBUSY; 1960 mtx_unlock(&td->lctx_hash_lock); 1961 1962 taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources); 1963 mtx_lock(&td->unsent_wr_lock); 1964 if (!STAILQ_EMPTY(&td->unsent_wr_list)) 1965 rc = EBUSY; 1966 mtx_unlock(&td->unsent_wr_lock); 1967 1968 if (rc == 0) { 1969 unregister_toedev(sc->tom_softc); 1970 free_tom_data(sc, td); 1971 sc->tom_softc = NULL; 1972 } 1973 1974 return (rc); 1975 } 1976 1977 static int 1978 t4_aio_queue_tom(struct socket *so, struct kaiocb *job) 1979 { 1980 struct tcpcb *tp = so_sototcpcb(so); 1981 struct toepcb *toep = tp->t_toe; 1982 int error; 1983 1984 if (ulp_mode(toep) == ULP_MODE_TCPDDP) { 1985 error = t4_aio_queue_ddp(so, job); 1986 if (error != EOPNOTSUPP) 1987 return (error); 1988 } 1989 1990 return (t4_aio_queue_aiotx(so, job)); 1991 } 1992 1993 static int 1994 t4_ctloutput_tom(struct socket *so, struct sockopt *sopt) 1995 { 1996 1997 if (sopt->sopt_level != IPPROTO_TCP) 1998 return (tcp_ctloutput(so, sopt)); 1999 2000 switch (sopt->sopt_name) { 2001 case TCP_TLSOM_SET_TLS_CONTEXT: 2002 case TCP_TLSOM_GET_TLS_TOM: 2003 case TCP_TLSOM_CLR_TLS_TOM: 2004 case TCP_TLSOM_CLR_QUIES: 2005 return (t4_ctloutput_tls(so, sopt)); 2006 default: 2007 return (tcp_ctloutput(so, sopt)); 2008 } 2009 } 2010 2011 static int 2012 t4_tom_mod_load(void) 2013 { 2014 /* CPL handlers */ 2015 t4_register_cpl_handler(CPL_GET_TCB_RPL, do_get_tcb_rpl); 2016 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl2, 2017 CPL_COOKIE_TOM); 2018 t4_init_connect_cpl_handlers(); 2019 t4_init_listen_cpl_handlers(); 2020 t4_init_cpl_io_handlers(); 2021 2022 t4_ddp_mod_load(); 2023 t4_tls_mod_load(); 2024 2025 tcp_protosw = pffindproto(PF_INET, IPPROTO_TCP, SOCK_STREAM); 2026 if (tcp_protosw == NULL) 2027 return (ENOPROTOOPT); 2028 bcopy(tcp_protosw, &toe_protosw, sizeof(toe_protosw)); 2029 bcopy(tcp_protosw->pr_usrreqs, &toe_usrreqs, sizeof(toe_usrreqs)); 2030 toe_usrreqs.pru_aio_queue = t4_aio_queue_tom; 2031 toe_protosw.pr_ctloutput = t4_ctloutput_tom; 2032 toe_protosw.pr_usrreqs = &toe_usrreqs; 2033 2034 tcp6_protosw = pffindproto(PF_INET6, IPPROTO_TCP, SOCK_STREAM); 2035 if (tcp6_protosw == NULL) 2036 return (ENOPROTOOPT); 2037 bcopy(tcp6_protosw, &toe6_protosw, sizeof(toe6_protosw)); 2038 bcopy(tcp6_protosw->pr_usrreqs, &toe6_usrreqs, sizeof(toe6_usrreqs)); 2039 toe6_usrreqs.pru_aio_queue = t4_aio_queue_tom; 2040 toe6_protosw.pr_ctloutput = t4_ctloutput_tom; 2041 toe6_protosw.pr_usrreqs = &toe6_usrreqs; 2042 2043 return (t4_register_uld(&tom_uld_info)); 2044 } 2045 2046 static void 2047 tom_uninit(struct adapter *sc, void *arg __unused) 2048 { 2049 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tomun")) 2050 return; 2051 2052 /* Try to free resources (works only if no port has IFCAP_TOE) */ 2053 if (uld_active(sc, ULD_TOM)) 2054 t4_deactivate_uld(sc, ULD_TOM); 2055 2056 end_synchronized_op(sc, 0); 2057 } 2058 2059 static int 2060 t4_tom_mod_unload(void) 2061 { 2062 t4_iterate(tom_uninit, NULL); 2063 2064 if (t4_unregister_uld(&tom_uld_info) == EBUSY) 2065 return (EBUSY); 2066 2067 t4_tls_mod_unload(); 2068 t4_ddp_mod_unload(); 2069 2070 t4_uninit_connect_cpl_handlers(); 2071 t4_uninit_listen_cpl_handlers(); 2072 t4_uninit_cpl_io_handlers(); 2073 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, NULL, CPL_COOKIE_TOM); 2074 t4_register_cpl_handler(CPL_GET_TCB_RPL, NULL); 2075 2076 return (0); 2077 } 2078 #endif /* TCP_OFFLOAD */ 2079 2080 static int 2081 t4_tom_modevent(module_t mod, int cmd, void *arg) 2082 { 2083 int rc = 0; 2084 2085 #ifdef TCP_OFFLOAD 2086 switch (cmd) { 2087 case MOD_LOAD: 2088 rc = t4_tom_mod_load(); 2089 break; 2090 2091 case MOD_UNLOAD: 2092 rc = t4_tom_mod_unload(); 2093 break; 2094 2095 default: 2096 rc = EINVAL; 2097 } 2098 #else 2099 printf("t4_tom: compiled without TCP_OFFLOAD support.\n"); 2100 rc = EOPNOTSUPP; 2101 #endif 2102 return (rc); 2103 } 2104 2105 static moduledata_t t4_tom_moddata= { 2106 "t4_tom", 2107 t4_tom_modevent, 2108 0 2109 }; 2110 2111 MODULE_VERSION(t4_tom, 1); 2112 MODULE_DEPEND(t4_tom, toecore, 1, 1, 1); 2113 MODULE_DEPEND(t4_tom, t4nex, 1, 1, 1); 2114 DECLARE_MODULE(t4_tom, t4_tom_moddata, SI_SUB_EXEC, SI_ORDER_ANY); 2115