1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 #include "opt_ratelimit.h" 36 37 #include <sys/param.h> 38 #include <sys/types.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/ktr.h> 42 #include <sys/lock.h> 43 #include <sys/limits.h> 44 #include <sys/module.h> 45 #include <sys/protosw.h> 46 #include <sys/domain.h> 47 #include <sys/refcount.h> 48 #include <sys/rmlock.h> 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/taskqueue.h> 52 #include <net/if.h> 53 #include <net/if_var.h> 54 #include <netinet/in.h> 55 #include <netinet/in_pcb.h> 56 #include <netinet/in_var.h> 57 #include <netinet/ip.h> 58 #include <netinet/ip6.h> 59 #include <netinet6/scope6_var.h> 60 #define TCPSTATES 61 #include <netinet/tcp_fsm.h> 62 #include <netinet/tcp_timer.h> 63 #include <netinet/tcp_var.h> 64 #include <netinet/toecore.h> 65 66 #ifdef TCP_OFFLOAD 67 #include "common/common.h" 68 #include "common/t4_msg.h" 69 #include "common/t4_regs.h" 70 #include "common/t4_regs_values.h" 71 #include "common/t4_tcb.h" 72 #include "tom/t4_tom_l2t.h" 73 #include "tom/t4_tom.h" 74 #include "tom/t4_tls.h" 75 76 static struct protosw toe_protosw; 77 static struct pr_usrreqs toe_usrreqs; 78 79 static struct protosw toe6_protosw; 80 static struct pr_usrreqs toe6_usrreqs; 81 82 /* Module ops */ 83 static int t4_tom_mod_load(void); 84 static int t4_tom_mod_unload(void); 85 static int t4_tom_modevent(module_t, int, void *); 86 87 /* ULD ops and helpers */ 88 static int t4_tom_activate(struct adapter *); 89 static int t4_tom_deactivate(struct adapter *); 90 91 static struct uld_info tom_uld_info = { 92 .uld_id = ULD_TOM, 93 .activate = t4_tom_activate, 94 .deactivate = t4_tom_deactivate, 95 }; 96 97 static void queue_tid_release(struct adapter *, int); 98 static void release_offload_resources(struct toepcb *); 99 static int alloc_tid_tabs(struct tid_info *); 100 static void free_tid_tabs(struct tid_info *); 101 static int add_lip(struct adapter *, struct in6_addr *); 102 static int delete_lip(struct adapter *, struct in6_addr *); 103 static struct clip_entry *search_lip(struct tom_data *, struct in6_addr *); 104 static void init_clip_table(struct adapter *, struct tom_data *); 105 static void update_clip(struct adapter *, void *); 106 static void t4_clip_task(void *, int); 107 static void update_clip_table(struct adapter *, struct tom_data *); 108 static void destroy_clip_table(struct adapter *, struct tom_data *); 109 static void free_tom_data(struct adapter *, struct tom_data *); 110 static void reclaim_wr_resources(void *, int); 111 112 static int in6_ifaddr_gen; 113 static eventhandler_tag ifaddr_evhandler; 114 static struct timeout_task clip_task; 115 116 struct toepcb * 117 alloc_toepcb(struct vi_info *vi, int txqid, int rxqid, int flags) 118 { 119 struct port_info *pi = vi->pi; 120 struct adapter *sc = pi->adapter; 121 struct toepcb *toep; 122 int tx_credits, txsd_total, len; 123 124 /* 125 * The firmware counts tx work request credits in units of 16 bytes 126 * each. Reserve room for an ABORT_REQ so the driver never has to worry 127 * about tx credits if it wants to abort a connection. 128 */ 129 tx_credits = sc->params.ofldq_wr_cred; 130 tx_credits -= howmany(sizeof(struct cpl_abort_req), 16); 131 132 /* 133 * Shortest possible tx work request is a fw_ofld_tx_data_wr + 1 byte 134 * immediate payload, and firmware counts tx work request credits in 135 * units of 16 byte. Calculate the maximum work requests possible. 136 */ 137 txsd_total = tx_credits / 138 howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16); 139 140 if (txqid < 0) 141 txqid = (arc4random() % vi->nofldtxq) + vi->first_ofld_txq; 142 KASSERT(txqid >= vi->first_ofld_txq && 143 txqid < vi->first_ofld_txq + vi->nofldtxq, 144 ("%s: txqid %d for vi %p (first %d, n %d)", __func__, txqid, vi, 145 vi->first_ofld_txq, vi->nofldtxq)); 146 147 if (rxqid < 0) 148 rxqid = (arc4random() % vi->nofldrxq) + vi->first_ofld_rxq; 149 KASSERT(rxqid >= vi->first_ofld_rxq && 150 rxqid < vi->first_ofld_rxq + vi->nofldrxq, 151 ("%s: rxqid %d for vi %p (first %d, n %d)", __func__, rxqid, vi, 152 vi->first_ofld_rxq, vi->nofldrxq)); 153 154 len = offsetof(struct toepcb, txsd) + 155 txsd_total * sizeof(struct ofld_tx_sdesc); 156 157 toep = malloc(len, M_CXGBE, M_ZERO | flags); 158 if (toep == NULL) 159 return (NULL); 160 161 refcount_init(&toep->refcount, 1); 162 toep->td = sc->tom_softc; 163 toep->vi = vi; 164 toep->tc_idx = -1; 165 toep->tx_total = tx_credits; 166 toep->tx_credits = tx_credits; 167 toep->ofld_txq = &sc->sge.ofld_txq[txqid]; 168 toep->ofld_rxq = &sc->sge.ofld_rxq[rxqid]; 169 toep->ctrlq = &sc->sge.ctrlq[pi->port_id]; 170 mbufq_init(&toep->ulp_pduq, INT_MAX); 171 mbufq_init(&toep->ulp_pdu_reclaimq, INT_MAX); 172 toep->txsd_total = txsd_total; 173 toep->txsd_avail = txsd_total; 174 toep->txsd_pidx = 0; 175 toep->txsd_cidx = 0; 176 aiotx_init_toep(toep); 177 178 return (toep); 179 } 180 181 struct toepcb * 182 hold_toepcb(struct toepcb *toep) 183 { 184 185 refcount_acquire(&toep->refcount); 186 return (toep); 187 } 188 189 void 190 free_toepcb(struct toepcb *toep) 191 { 192 193 if (refcount_release(&toep->refcount) == 0) 194 return; 195 196 KASSERT(!(toep->flags & TPF_ATTACHED), 197 ("%s: attached to an inpcb", __func__)); 198 KASSERT(!(toep->flags & TPF_CPL_PENDING), 199 ("%s: CPL pending", __func__)); 200 201 if (toep->ulp_mode == ULP_MODE_TCPDDP) 202 ddp_uninit_toep(toep); 203 tls_uninit_toep(toep); 204 free(toep, M_CXGBE); 205 } 206 207 /* 208 * Set up the socket for TCP offload. 209 */ 210 void 211 offload_socket(struct socket *so, struct toepcb *toep) 212 { 213 struct tom_data *td = toep->td; 214 struct inpcb *inp = sotoinpcb(so); 215 struct tcpcb *tp = intotcpcb(inp); 216 struct sockbuf *sb; 217 218 INP_WLOCK_ASSERT(inp); 219 220 /* Update socket */ 221 sb = &so->so_snd; 222 SOCKBUF_LOCK(sb); 223 sb->sb_flags |= SB_NOCOALESCE; 224 SOCKBUF_UNLOCK(sb); 225 sb = &so->so_rcv; 226 SOCKBUF_LOCK(sb); 227 sb->sb_flags |= SB_NOCOALESCE; 228 if (inp->inp_vflag & INP_IPV6) 229 so->so_proto = &toe6_protosw; 230 else 231 so->so_proto = &toe_protosw; 232 SOCKBUF_UNLOCK(sb); 233 234 /* Update TCP PCB */ 235 tp->tod = &td->tod; 236 tp->t_toe = toep; 237 tp->t_flags |= TF_TOE; 238 239 /* Install an extra hold on inp */ 240 toep->inp = inp; 241 toep->flags |= TPF_ATTACHED; 242 in_pcbref(inp); 243 244 /* Add the TOE PCB to the active list */ 245 mtx_lock(&td->toep_list_lock); 246 TAILQ_INSERT_HEAD(&td->toep_list, toep, link); 247 mtx_unlock(&td->toep_list_lock); 248 } 249 250 /* This is _not_ the normal way to "unoffload" a socket. */ 251 void 252 undo_offload_socket(struct socket *so) 253 { 254 struct inpcb *inp = sotoinpcb(so); 255 struct tcpcb *tp = intotcpcb(inp); 256 struct toepcb *toep = tp->t_toe; 257 struct tom_data *td = toep->td; 258 struct sockbuf *sb; 259 260 INP_WLOCK_ASSERT(inp); 261 262 sb = &so->so_snd; 263 SOCKBUF_LOCK(sb); 264 sb->sb_flags &= ~SB_NOCOALESCE; 265 SOCKBUF_UNLOCK(sb); 266 sb = &so->so_rcv; 267 SOCKBUF_LOCK(sb); 268 sb->sb_flags &= ~SB_NOCOALESCE; 269 SOCKBUF_UNLOCK(sb); 270 271 tp->tod = NULL; 272 tp->t_toe = NULL; 273 tp->t_flags &= ~TF_TOE; 274 275 toep->inp = NULL; 276 toep->flags &= ~TPF_ATTACHED; 277 if (in_pcbrele_wlocked(inp)) 278 panic("%s: inp freed.", __func__); 279 280 mtx_lock(&td->toep_list_lock); 281 TAILQ_REMOVE(&td->toep_list, toep, link); 282 mtx_unlock(&td->toep_list_lock); 283 } 284 285 static void 286 release_offload_resources(struct toepcb *toep) 287 { 288 struct tom_data *td = toep->td; 289 struct adapter *sc = td_adapter(td); 290 int tid = toep->tid; 291 292 KASSERT(!(toep->flags & TPF_CPL_PENDING), 293 ("%s: %p has CPL pending.", __func__, toep)); 294 KASSERT(!(toep->flags & TPF_ATTACHED), 295 ("%s: %p is still attached.", __func__, toep)); 296 297 CTR5(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p, ce %p)", 298 __func__, toep, tid, toep->l2te, toep->ce); 299 300 /* 301 * These queues should have been emptied at approximately the same time 302 * that a normal connection's socket's so_snd would have been purged or 303 * drained. Do _not_ clean up here. 304 */ 305 MPASS(mbufq_len(&toep->ulp_pduq) == 0); 306 MPASS(mbufq_len(&toep->ulp_pdu_reclaimq) == 0); 307 #ifdef INVARIANTS 308 if (toep->ulp_mode == ULP_MODE_TCPDDP) 309 ddp_assert_empty(toep); 310 #endif 311 312 if (toep->l2te) 313 t4_l2t_release(toep->l2te); 314 315 if (tid >= 0) { 316 remove_tid(sc, tid, toep->ce ? 2 : 1); 317 release_tid(sc, tid, toep->ctrlq); 318 } 319 320 if (toep->ce) 321 release_lip(td, toep->ce); 322 323 #ifdef RATELIMIT 324 if (toep->tc_idx != -1) 325 t4_release_cl_rl_kbps(sc, toep->vi->pi->port_id, toep->tc_idx); 326 #endif 327 mtx_lock(&td->toep_list_lock); 328 TAILQ_REMOVE(&td->toep_list, toep, link); 329 mtx_unlock(&td->toep_list_lock); 330 331 free_toepcb(toep); 332 } 333 334 /* 335 * The kernel is done with the TCP PCB and this is our opportunity to unhook the 336 * toepcb hanging off of it. If the TOE driver is also done with the toepcb (no 337 * pending CPL) then it is time to release all resources tied to the toepcb. 338 * 339 * Also gets called when an offloaded active open fails and the TOM wants the 340 * kernel to take the TCP PCB back. 341 */ 342 static void 343 t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp) 344 { 345 #if defined(KTR) || defined(INVARIANTS) 346 struct inpcb *inp = tp->t_inpcb; 347 #endif 348 struct toepcb *toep = tp->t_toe; 349 350 INP_WLOCK_ASSERT(inp); 351 352 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 353 KASSERT(toep->flags & TPF_ATTACHED, 354 ("%s: not attached", __func__)); 355 356 #ifdef KTR 357 if (tp->t_state == TCPS_SYN_SENT) { 358 CTR6(KTR_CXGBE, "%s: atid %d, toep %p (0x%x), inp %p (0x%x)", 359 __func__, toep->tid, toep, toep->flags, inp, 360 inp->inp_flags); 361 } else { 362 CTR6(KTR_CXGBE, 363 "t4_pcb_detach: tid %d (%s), toep %p (0x%x), inp %p (0x%x)", 364 toep->tid, tcpstates[tp->t_state], toep, toep->flags, inp, 365 inp->inp_flags); 366 } 367 #endif 368 369 tp->t_toe = NULL; 370 tp->t_flags &= ~TF_TOE; 371 toep->flags &= ~TPF_ATTACHED; 372 373 if (!(toep->flags & TPF_CPL_PENDING)) 374 release_offload_resources(toep); 375 } 376 377 /* 378 * setsockopt handler. 379 */ 380 static void 381 t4_ctloutput(struct toedev *tod, struct tcpcb *tp, int dir, int name) 382 { 383 struct adapter *sc = tod->tod_softc; 384 struct toepcb *toep = tp->t_toe; 385 386 if (dir == SOPT_GET) 387 return; 388 389 CTR4(KTR_CXGBE, "%s: tp %p, dir %u, name %u", __func__, tp, dir, name); 390 391 switch (name) { 392 case TCP_NODELAY: 393 if (tp->t_state != TCPS_ESTABLISHED) 394 break; 395 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS, 396 V_TF_NAGLE(1), V_TF_NAGLE(tp->t_flags & TF_NODELAY ? 0 : 1), 397 0, 0); 398 break; 399 default: 400 break; 401 } 402 } 403 404 /* 405 * The TOE driver will not receive any more CPLs for the tid associated with the 406 * toepcb; release the hold on the inpcb. 407 */ 408 void 409 final_cpl_received(struct toepcb *toep) 410 { 411 struct inpcb *inp = toep->inp; 412 413 KASSERT(inp != NULL, ("%s: inp is NULL", __func__)); 414 INP_WLOCK_ASSERT(inp); 415 KASSERT(toep->flags & TPF_CPL_PENDING, 416 ("%s: CPL not pending already?", __func__)); 417 418 CTR6(KTR_CXGBE, "%s: tid %d, toep %p (0x%x), inp %p (0x%x)", 419 __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags); 420 421 if (toep->ulp_mode == ULP_MODE_TCPDDP) 422 release_ddp_resources(toep); 423 toep->inp = NULL; 424 toep->flags &= ~TPF_CPL_PENDING; 425 mbufq_drain(&toep->ulp_pdu_reclaimq); 426 427 if (!(toep->flags & TPF_ATTACHED)) 428 release_offload_resources(toep); 429 430 if (!in_pcbrele_wlocked(inp)) 431 INP_WUNLOCK(inp); 432 } 433 434 void 435 insert_tid(struct adapter *sc, int tid, void *ctx, int ntids) 436 { 437 struct tid_info *t = &sc->tids; 438 439 t->tid_tab[tid] = ctx; 440 atomic_add_int(&t->tids_in_use, ntids); 441 } 442 443 void * 444 lookup_tid(struct adapter *sc, int tid) 445 { 446 struct tid_info *t = &sc->tids; 447 448 return (t->tid_tab[tid]); 449 } 450 451 void 452 update_tid(struct adapter *sc, int tid, void *ctx) 453 { 454 struct tid_info *t = &sc->tids; 455 456 t->tid_tab[tid] = ctx; 457 } 458 459 void 460 remove_tid(struct adapter *sc, int tid, int ntids) 461 { 462 struct tid_info *t = &sc->tids; 463 464 t->tid_tab[tid] = NULL; 465 atomic_subtract_int(&t->tids_in_use, ntids); 466 } 467 468 void 469 release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq) 470 { 471 struct wrqe *wr; 472 struct cpl_tid_release *req; 473 474 wr = alloc_wrqe(sizeof(*req), ctrlq); 475 if (wr == NULL) { 476 queue_tid_release(sc, tid); /* defer */ 477 return; 478 } 479 req = wrtod(wr); 480 481 INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid); 482 483 t4_wrq_tx(sc, wr); 484 } 485 486 static void 487 queue_tid_release(struct adapter *sc, int tid) 488 { 489 490 CXGBE_UNIMPLEMENTED("deferred tid release"); 491 } 492 493 /* 494 * What mtu_idx to use, given a 4-tuple and/or an MSS cap 495 */ 496 int 497 find_best_mtu_idx(struct adapter *sc, struct in_conninfo *inc, int pmss) 498 { 499 unsigned short *mtus = &sc->params.mtus[0]; 500 int i, mss, n; 501 502 KASSERT(inc != NULL || pmss > 0, 503 ("%s: at least one of inc/pmss must be specified", __func__)); 504 505 mss = inc ? tcp_mssopt(inc) : pmss; 506 if (pmss > 0 && mss > pmss) 507 mss = pmss; 508 509 if (inc->inc_flags & INC_ISIPV6) 510 n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 511 else 512 n = sizeof(struct ip) + sizeof(struct tcphdr); 513 514 for (i = 0; i < NMTUS - 1 && mtus[i + 1] <= mss + n; i++) 515 continue; 516 517 return (i); 518 } 519 520 /* 521 * Determine the receive window size for a socket. 522 */ 523 u_long 524 select_rcv_wnd(struct socket *so) 525 { 526 unsigned long wnd; 527 528 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 529 530 wnd = sbspace(&so->so_rcv); 531 if (wnd < MIN_RCV_WND) 532 wnd = MIN_RCV_WND; 533 534 return min(wnd, MAX_RCV_WND); 535 } 536 537 int 538 select_rcv_wscale(void) 539 { 540 int wscale = 0; 541 unsigned long space = sb_max; 542 543 if (space > MAX_RCV_WND) 544 space = MAX_RCV_WND; 545 546 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < space) 547 wscale++; 548 549 return (wscale); 550 } 551 552 /* 553 * socket so could be a listening socket too. 554 */ 555 uint64_t 556 calc_opt0(struct socket *so, struct vi_info *vi, struct l2t_entry *e, 557 int mtu_idx, int rscale, int rx_credits, int ulp_mode) 558 { 559 uint64_t opt0; 560 561 KASSERT(rx_credits <= M_RCV_BUFSIZ, 562 ("%s: rcv_bufsiz too high", __func__)); 563 564 opt0 = F_TCAM_BYPASS | V_WND_SCALE(rscale) | V_MSS_IDX(mtu_idx) | 565 V_ULP_MODE(ulp_mode) | V_RCV_BUFSIZ(rx_credits); 566 567 if (so != NULL) { 568 struct inpcb *inp = sotoinpcb(so); 569 struct tcpcb *tp = intotcpcb(inp); 570 int keepalive = tcp_always_keepalive || 571 so_options_get(so) & SO_KEEPALIVE; 572 573 opt0 |= V_NAGLE((tp->t_flags & TF_NODELAY) == 0); 574 opt0 |= V_KEEP_ALIVE(keepalive != 0); 575 } 576 577 if (e != NULL) 578 opt0 |= V_L2T_IDX(e->idx); 579 580 if (vi != NULL) { 581 opt0 |= V_SMAC_SEL(vi->smt_idx); 582 opt0 |= V_TX_CHAN(vi->pi->tx_chan); 583 } 584 585 return htobe64(opt0); 586 } 587 588 uint64_t 589 select_ntuple(struct vi_info *vi, struct l2t_entry *e) 590 { 591 struct adapter *sc = vi->pi->adapter; 592 struct tp_params *tp = &sc->params.tp; 593 uint16_t viid = vi->viid; 594 uint64_t ntuple = 0; 595 596 /* 597 * Initialize each of the fields which we care about which are present 598 * in the Compressed Filter Tuple. 599 */ 600 if (tp->vlan_shift >= 0 && e->vlan != CPL_L2T_VLAN_NONE) 601 ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift; 602 603 if (tp->port_shift >= 0) 604 ntuple |= (uint64_t)e->lport << tp->port_shift; 605 606 if (tp->protocol_shift >= 0) 607 ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift; 608 609 if (tp->vnic_shift >= 0) { 610 uint32_t vf = G_FW_VIID_VIN(viid); 611 uint32_t pf = G_FW_VIID_PFN(viid); 612 uint32_t vld = G_FW_VIID_VIVLD(viid); 613 614 ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vf) | V_FT_VNID_ID_PF(pf) | 615 V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift; 616 } 617 618 if (is_t4(sc)) 619 return (htobe32((uint32_t)ntuple)); 620 else 621 return (htobe64(V_FILTER_TUPLE(ntuple))); 622 } 623 624 static int 625 is_tls_sock(struct socket *so, struct adapter *sc) 626 { 627 struct inpcb *inp = sotoinpcb(so); 628 int i, rc; 629 630 /* XXX: Eventually add a SO_WANT_TLS socket option perhaps? */ 631 rc = 0; 632 ADAPTER_LOCK(sc); 633 for (i = 0; i < sc->tt.num_tls_rx_ports; i++) { 634 if (inp->inp_lport == htons(sc->tt.tls_rx_ports[i]) || 635 inp->inp_fport == htons(sc->tt.tls_rx_ports[i])) { 636 rc = 1; 637 break; 638 } 639 } 640 ADAPTER_UNLOCK(sc); 641 return (rc); 642 } 643 644 int 645 select_ulp_mode(struct socket *so, struct adapter *sc) 646 { 647 648 if (can_tls_offload(sc) && is_tls_sock(so, sc)) 649 return (ULP_MODE_TLS); 650 else if (sc->tt.ddp && (so->so_options & SO_NO_DDP) == 0) 651 return (ULP_MODE_TCPDDP); 652 else 653 return (ULP_MODE_NONE); 654 } 655 656 void 657 set_ulp_mode(struct toepcb *toep, int ulp_mode) 658 { 659 660 CTR4(KTR_CXGBE, "%s: toep %p (tid %d) ulp_mode %d", 661 __func__, toep, toep->tid, ulp_mode); 662 toep->ulp_mode = ulp_mode; 663 tls_init_toep(toep); 664 if (toep->ulp_mode == ULP_MODE_TCPDDP) 665 ddp_init_toep(toep); 666 } 667 668 int 669 negative_advice(int status) 670 { 671 672 return (status == CPL_ERR_RTX_NEG_ADVICE || 673 status == CPL_ERR_PERSIST_NEG_ADVICE || 674 status == CPL_ERR_KEEPALV_NEG_ADVICE); 675 } 676 677 static int 678 alloc_tid_tabs(struct tid_info *t) 679 { 680 size_t size; 681 unsigned int i; 682 683 size = t->ntids * sizeof(*t->tid_tab) + 684 t->natids * sizeof(*t->atid_tab) + 685 t->nstids * sizeof(*t->stid_tab); 686 687 t->tid_tab = malloc(size, M_CXGBE, M_ZERO | M_NOWAIT); 688 if (t->tid_tab == NULL) 689 return (ENOMEM); 690 691 mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF); 692 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; 693 t->afree = t->atid_tab; 694 t->atids_in_use = 0; 695 for (i = 1; i < t->natids; i++) 696 t->atid_tab[i - 1].next = &t->atid_tab[i]; 697 t->atid_tab[t->natids - 1].next = NULL; 698 699 mtx_init(&t->stid_lock, "stid lock", NULL, MTX_DEF); 700 t->stid_tab = (struct listen_ctx **)&t->atid_tab[t->natids]; 701 t->stids_in_use = 0; 702 TAILQ_INIT(&t->stids); 703 t->nstids_free_head = t->nstids; 704 705 atomic_store_rel_int(&t->tids_in_use, 0); 706 707 return (0); 708 } 709 710 static void 711 free_tid_tabs(struct tid_info *t) 712 { 713 KASSERT(t->tids_in_use == 0, 714 ("%s: %d tids still in use.", __func__, t->tids_in_use)); 715 KASSERT(t->atids_in_use == 0, 716 ("%s: %d atids still in use.", __func__, t->atids_in_use)); 717 KASSERT(t->stids_in_use == 0, 718 ("%s: %d tids still in use.", __func__, t->stids_in_use)); 719 720 free(t->tid_tab, M_CXGBE); 721 t->tid_tab = NULL; 722 723 if (mtx_initialized(&t->atid_lock)) 724 mtx_destroy(&t->atid_lock); 725 if (mtx_initialized(&t->stid_lock)) 726 mtx_destroy(&t->stid_lock); 727 } 728 729 static int 730 add_lip(struct adapter *sc, struct in6_addr *lip) 731 { 732 struct fw_clip_cmd c; 733 734 ASSERT_SYNCHRONIZED_OP(sc); 735 /* mtx_assert(&td->clip_table_lock, MA_OWNED); */ 736 737 memset(&c, 0, sizeof(c)); 738 c.op_to_write = htonl(V_FW_CMD_OP(FW_CLIP_CMD) | F_FW_CMD_REQUEST | 739 F_FW_CMD_WRITE); 740 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c)); 741 c.ip_hi = *(uint64_t *)&lip->s6_addr[0]; 742 c.ip_lo = *(uint64_t *)&lip->s6_addr[8]; 743 744 return (-t4_wr_mbox_ns(sc, sc->mbox, &c, sizeof(c), &c)); 745 } 746 747 static int 748 delete_lip(struct adapter *sc, struct in6_addr *lip) 749 { 750 struct fw_clip_cmd c; 751 752 ASSERT_SYNCHRONIZED_OP(sc); 753 /* mtx_assert(&td->clip_table_lock, MA_OWNED); */ 754 755 memset(&c, 0, sizeof(c)); 756 c.op_to_write = htonl(V_FW_CMD_OP(FW_CLIP_CMD) | F_FW_CMD_REQUEST | 757 F_FW_CMD_READ); 758 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c)); 759 c.ip_hi = *(uint64_t *)&lip->s6_addr[0]; 760 c.ip_lo = *(uint64_t *)&lip->s6_addr[8]; 761 762 return (-t4_wr_mbox_ns(sc, sc->mbox, &c, sizeof(c), &c)); 763 } 764 765 static struct clip_entry * 766 search_lip(struct tom_data *td, struct in6_addr *lip) 767 { 768 struct clip_entry *ce; 769 770 mtx_assert(&td->clip_table_lock, MA_OWNED); 771 772 TAILQ_FOREACH(ce, &td->clip_table, link) { 773 if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) 774 return (ce); 775 } 776 777 return (NULL); 778 } 779 780 struct clip_entry * 781 hold_lip(struct tom_data *td, struct in6_addr *lip, struct clip_entry *ce) 782 { 783 784 mtx_lock(&td->clip_table_lock); 785 if (ce == NULL) 786 ce = search_lip(td, lip); 787 if (ce != NULL) 788 ce->refcount++; 789 mtx_unlock(&td->clip_table_lock); 790 791 return (ce); 792 } 793 794 void 795 release_lip(struct tom_data *td, struct clip_entry *ce) 796 { 797 798 mtx_lock(&td->clip_table_lock); 799 KASSERT(search_lip(td, &ce->lip) == ce, 800 ("%s: CLIP entry %p p not in CLIP table.", __func__, ce)); 801 KASSERT(ce->refcount > 0, 802 ("%s: CLIP entry %p has refcount 0", __func__, ce)); 803 --ce->refcount; 804 mtx_unlock(&td->clip_table_lock); 805 } 806 807 static void 808 init_clip_table(struct adapter *sc, struct tom_data *td) 809 { 810 811 ASSERT_SYNCHRONIZED_OP(sc); 812 813 mtx_init(&td->clip_table_lock, "CLIP table lock", NULL, MTX_DEF); 814 TAILQ_INIT(&td->clip_table); 815 td->clip_gen = -1; 816 817 update_clip_table(sc, td); 818 } 819 820 static void 821 update_clip(struct adapter *sc, void *arg __unused) 822 { 823 824 if (begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4tomuc")) 825 return; 826 827 if (uld_active(sc, ULD_TOM)) 828 update_clip_table(sc, sc->tom_softc); 829 830 end_synchronized_op(sc, LOCK_HELD); 831 } 832 833 static void 834 t4_clip_task(void *arg, int count) 835 { 836 837 t4_iterate(update_clip, NULL); 838 } 839 840 static void 841 update_clip_table(struct adapter *sc, struct tom_data *td) 842 { 843 struct rm_priotracker in6_ifa_tracker; 844 struct in6_ifaddr *ia; 845 struct in6_addr *lip, tlip; 846 struct clip_head stale; 847 struct clip_entry *ce, *ce_temp; 848 struct vi_info *vi; 849 int rc, gen, i, j; 850 uintptr_t last_vnet; 851 852 ASSERT_SYNCHRONIZED_OP(sc); 853 854 IN6_IFADDR_RLOCK(&in6_ifa_tracker); 855 mtx_lock(&td->clip_table_lock); 856 857 gen = atomic_load_acq_int(&in6_ifaddr_gen); 858 if (gen == td->clip_gen) 859 goto done; 860 861 TAILQ_INIT(&stale); 862 TAILQ_CONCAT(&stale, &td->clip_table, link); 863 864 /* 865 * last_vnet optimizes the common cases where all if_vnet = NULL (no 866 * VIMAGE) or all if_vnet = vnet0. 867 */ 868 last_vnet = (uintptr_t)(-1); 869 for_each_port(sc, i) 870 for_each_vi(sc->port[i], j, vi) { 871 if (last_vnet == (uintptr_t)vi->ifp->if_vnet) 872 continue; 873 874 /* XXX: races with if_vmove */ 875 CURVNET_SET(vi->ifp->if_vnet); 876 TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) { 877 lip = &ia->ia_addr.sin6_addr; 878 879 KASSERT(!IN6_IS_ADDR_MULTICAST(lip), 880 ("%s: mcast address in in6_ifaddr list", __func__)); 881 882 if (IN6_IS_ADDR_LOOPBACK(lip)) 883 continue; 884 if (IN6_IS_SCOPE_EMBED(lip)) { 885 /* Remove the embedded scope */ 886 tlip = *lip; 887 lip = &tlip; 888 in6_clearscope(lip); 889 } 890 /* 891 * XXX: how to weed out the link local address for the 892 * loopback interface? It's fe80::1 usually (always?). 893 */ 894 895 /* 896 * If it's in the main list then we already know it's 897 * not stale. 898 */ 899 TAILQ_FOREACH(ce, &td->clip_table, link) { 900 if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) 901 goto next; 902 } 903 904 /* 905 * If it's in the stale list we should move it to the 906 * main list. 907 */ 908 TAILQ_FOREACH(ce, &stale, link) { 909 if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) { 910 TAILQ_REMOVE(&stale, ce, link); 911 TAILQ_INSERT_TAIL(&td->clip_table, ce, 912 link); 913 goto next; 914 } 915 } 916 917 /* A new IP6 address; add it to the CLIP table */ 918 ce = malloc(sizeof(*ce), M_CXGBE, M_NOWAIT); 919 memcpy(&ce->lip, lip, sizeof(ce->lip)); 920 ce->refcount = 0; 921 rc = add_lip(sc, lip); 922 if (rc == 0) 923 TAILQ_INSERT_TAIL(&td->clip_table, ce, link); 924 else { 925 char ip[INET6_ADDRSTRLEN]; 926 927 inet_ntop(AF_INET6, &ce->lip, &ip[0], 928 sizeof(ip)); 929 log(LOG_ERR, "%s: could not add %s (%d)\n", 930 __func__, ip, rc); 931 free(ce, M_CXGBE); 932 } 933 next: 934 continue; 935 } 936 CURVNET_RESTORE(); 937 last_vnet = (uintptr_t)vi->ifp->if_vnet; 938 } 939 940 /* 941 * Remove stale addresses (those no longer in V_in6_ifaddrhead) that are 942 * no longer referenced by the driver. 943 */ 944 TAILQ_FOREACH_SAFE(ce, &stale, link, ce_temp) { 945 if (ce->refcount == 0) { 946 rc = delete_lip(sc, &ce->lip); 947 if (rc == 0) { 948 TAILQ_REMOVE(&stale, ce, link); 949 free(ce, M_CXGBE); 950 } else { 951 char ip[INET6_ADDRSTRLEN]; 952 953 inet_ntop(AF_INET6, &ce->lip, &ip[0], 954 sizeof(ip)); 955 log(LOG_ERR, "%s: could not delete %s (%d)\n", 956 __func__, ip, rc); 957 } 958 } 959 } 960 /* The ones that are still referenced need to stay in the CLIP table */ 961 TAILQ_CONCAT(&td->clip_table, &stale, link); 962 963 td->clip_gen = gen; 964 done: 965 mtx_unlock(&td->clip_table_lock); 966 IN6_IFADDR_RUNLOCK(&in6_ifa_tracker); 967 } 968 969 static void 970 destroy_clip_table(struct adapter *sc, struct tom_data *td) 971 { 972 struct clip_entry *ce, *ce_temp; 973 974 if (mtx_initialized(&td->clip_table_lock)) { 975 mtx_lock(&td->clip_table_lock); 976 TAILQ_FOREACH_SAFE(ce, &td->clip_table, link, ce_temp) { 977 KASSERT(ce->refcount == 0, 978 ("%s: CLIP entry %p still in use (%d)", __func__, 979 ce, ce->refcount)); 980 TAILQ_REMOVE(&td->clip_table, ce, link); 981 delete_lip(sc, &ce->lip); 982 free(ce, M_CXGBE); 983 } 984 mtx_unlock(&td->clip_table_lock); 985 mtx_destroy(&td->clip_table_lock); 986 } 987 } 988 989 static void 990 free_tom_data(struct adapter *sc, struct tom_data *td) 991 { 992 993 ASSERT_SYNCHRONIZED_OP(sc); 994 995 KASSERT(TAILQ_EMPTY(&td->toep_list), 996 ("%s: TOE PCB list is not empty.", __func__)); 997 KASSERT(td->lctx_count == 0, 998 ("%s: lctx hash table is not empty.", __func__)); 999 1000 tls_free_kmap(td); 1001 t4_free_ppod_region(&td->pr); 1002 destroy_clip_table(sc, td); 1003 1004 if (td->listen_mask != 0) 1005 hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask); 1006 1007 if (mtx_initialized(&td->unsent_wr_lock)) 1008 mtx_destroy(&td->unsent_wr_lock); 1009 if (mtx_initialized(&td->lctx_hash_lock)) 1010 mtx_destroy(&td->lctx_hash_lock); 1011 if (mtx_initialized(&td->toep_list_lock)) 1012 mtx_destroy(&td->toep_list_lock); 1013 1014 free_tid_tabs(&sc->tids); 1015 free(td, M_CXGBE); 1016 } 1017 1018 static void 1019 reclaim_wr_resources(void *arg, int count) 1020 { 1021 struct tom_data *td = arg; 1022 STAILQ_HEAD(, wrqe) twr_list = STAILQ_HEAD_INITIALIZER(twr_list); 1023 struct cpl_act_open_req *cpl; 1024 u_int opcode, atid; 1025 struct wrqe *wr; 1026 struct adapter *sc; 1027 1028 mtx_lock(&td->unsent_wr_lock); 1029 STAILQ_SWAP(&td->unsent_wr_list, &twr_list, wrqe); 1030 mtx_unlock(&td->unsent_wr_lock); 1031 1032 while ((wr = STAILQ_FIRST(&twr_list)) != NULL) { 1033 STAILQ_REMOVE_HEAD(&twr_list, link); 1034 1035 cpl = wrtod(wr); 1036 opcode = GET_OPCODE(cpl); 1037 1038 switch (opcode) { 1039 case CPL_ACT_OPEN_REQ: 1040 case CPL_ACT_OPEN_REQ6: 1041 atid = G_TID_TID(be32toh(OPCODE_TID(cpl))); 1042 sc = td_adapter(td); 1043 1044 CTR2(KTR_CXGBE, "%s: atid %u ", __func__, atid); 1045 act_open_failure_cleanup(sc, atid, EHOSTUNREACH); 1046 free(wr, M_CXGBE); 1047 break; 1048 default: 1049 log(LOG_ERR, "%s: leaked work request %p, wr_len %d, " 1050 "opcode %x\n", __func__, wr, wr->wr_len, opcode); 1051 /* WR not freed here; go look at it with a debugger. */ 1052 } 1053 } 1054 } 1055 1056 /* 1057 * Ground control to Major TOM 1058 * Commencing countdown, engines on 1059 */ 1060 static int 1061 t4_tom_activate(struct adapter *sc) 1062 { 1063 struct tom_data *td; 1064 struct toedev *tod; 1065 struct vi_info *vi; 1066 struct sge_ofld_rxq *ofld_rxq; 1067 int i, j, rc, v; 1068 1069 ASSERT_SYNCHRONIZED_OP(sc); 1070 1071 /* per-adapter softc for TOM */ 1072 td = malloc(sizeof(*td), M_CXGBE, M_ZERO | M_NOWAIT); 1073 if (td == NULL) 1074 return (ENOMEM); 1075 1076 /* List of TOE PCBs and associated lock */ 1077 mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF); 1078 TAILQ_INIT(&td->toep_list); 1079 1080 /* Listen context */ 1081 mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF); 1082 td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE, 1083 &td->listen_mask, HASH_NOWAIT); 1084 1085 /* List of WRs for which L2 resolution failed */ 1086 mtx_init(&td->unsent_wr_lock, "Unsent WR list lock", NULL, MTX_DEF); 1087 STAILQ_INIT(&td->unsent_wr_list); 1088 TASK_INIT(&td->reclaim_wr_resources, 0, reclaim_wr_resources, td); 1089 1090 /* TID tables */ 1091 rc = alloc_tid_tabs(&sc->tids); 1092 if (rc != 0) 1093 goto done; 1094 1095 rc = t4_init_ppod_region(&td->pr, &sc->vres.ddp, 1096 t4_read_reg(sc, A_ULP_RX_TDDP_PSZ), "TDDP page pods"); 1097 if (rc != 0) 1098 goto done; 1099 t4_set_reg_field(sc, A_ULP_RX_TDDP_TAGMASK, 1100 V_TDDPTAGMASK(M_TDDPTAGMASK), td->pr.pr_tag_mask); 1101 1102 /* CLIP table for IPv6 offload */ 1103 init_clip_table(sc, td); 1104 1105 if (sc->vres.key.size != 0) { 1106 rc = tls_init_kmap(sc, td); 1107 if (rc != 0) 1108 goto done; 1109 } 1110 1111 /* toedev ops */ 1112 tod = &td->tod; 1113 init_toedev(tod); 1114 tod->tod_softc = sc; 1115 tod->tod_connect = t4_connect; 1116 tod->tod_listen_start = t4_listen_start; 1117 tod->tod_listen_stop = t4_listen_stop; 1118 tod->tod_rcvd = t4_rcvd; 1119 tod->tod_output = t4_tod_output; 1120 tod->tod_send_rst = t4_send_rst; 1121 tod->tod_send_fin = t4_send_fin; 1122 tod->tod_pcb_detach = t4_pcb_detach; 1123 tod->tod_l2_update = t4_l2_update; 1124 tod->tod_syncache_added = t4_syncache_added; 1125 tod->tod_syncache_removed = t4_syncache_removed; 1126 tod->tod_syncache_respond = t4_syncache_respond; 1127 tod->tod_offload_socket = t4_offload_socket; 1128 tod->tod_ctloutput = t4_ctloutput; 1129 1130 for_each_port(sc, i) { 1131 for_each_vi(sc->port[i], v, vi) { 1132 TOEDEV(vi->ifp) = &td->tod; 1133 for_each_ofld_rxq(vi, j, ofld_rxq) { 1134 ofld_rxq->iq.set_tcb_rpl = do_set_tcb_rpl; 1135 ofld_rxq->iq.l2t_write_rpl = do_l2t_write_rpl2; 1136 } 1137 } 1138 } 1139 1140 sc->tom_softc = td; 1141 register_toedev(sc->tom_softc); 1142 1143 done: 1144 if (rc != 0) 1145 free_tom_data(sc, td); 1146 return (rc); 1147 } 1148 1149 static int 1150 t4_tom_deactivate(struct adapter *sc) 1151 { 1152 int rc = 0; 1153 struct tom_data *td = sc->tom_softc; 1154 1155 ASSERT_SYNCHRONIZED_OP(sc); 1156 1157 if (td == NULL) 1158 return (0); /* XXX. KASSERT? */ 1159 1160 if (sc->offload_map != 0) 1161 return (EBUSY); /* at least one port has IFCAP_TOE enabled */ 1162 1163 if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI)) 1164 return (EBUSY); /* both iWARP and iSCSI rely on the TOE. */ 1165 1166 mtx_lock(&td->toep_list_lock); 1167 if (!TAILQ_EMPTY(&td->toep_list)) 1168 rc = EBUSY; 1169 mtx_unlock(&td->toep_list_lock); 1170 1171 mtx_lock(&td->lctx_hash_lock); 1172 if (td->lctx_count > 0) 1173 rc = EBUSY; 1174 mtx_unlock(&td->lctx_hash_lock); 1175 1176 taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources); 1177 mtx_lock(&td->unsent_wr_lock); 1178 if (!STAILQ_EMPTY(&td->unsent_wr_list)) 1179 rc = EBUSY; 1180 mtx_unlock(&td->unsent_wr_lock); 1181 1182 if (rc == 0) { 1183 unregister_toedev(sc->tom_softc); 1184 free_tom_data(sc, td); 1185 sc->tom_softc = NULL; 1186 } 1187 1188 return (rc); 1189 } 1190 1191 static void 1192 t4_tom_ifaddr_event(void *arg __unused, struct ifnet *ifp) 1193 { 1194 1195 atomic_add_rel_int(&in6_ifaddr_gen, 1); 1196 taskqueue_enqueue_timeout(taskqueue_thread, &clip_task, -hz / 4); 1197 } 1198 1199 static int 1200 t4_aio_queue_tom(struct socket *so, struct kaiocb *job) 1201 { 1202 struct tcpcb *tp = so_sototcpcb(so); 1203 struct toepcb *toep = tp->t_toe; 1204 int error; 1205 1206 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1207 error = t4_aio_queue_ddp(so, job); 1208 if (error != EOPNOTSUPP) 1209 return (error); 1210 } 1211 1212 return (t4_aio_queue_aiotx(so, job)); 1213 } 1214 1215 static int 1216 t4_ctloutput_tom(struct socket *so, struct sockopt *sopt) 1217 { 1218 1219 if (sopt->sopt_level != IPPROTO_TCP) 1220 return (tcp_ctloutput(so, sopt)); 1221 1222 switch (sopt->sopt_name) { 1223 case TCP_TLSOM_SET_TLS_CONTEXT: 1224 case TCP_TLSOM_GET_TLS_TOM: 1225 case TCP_TLSOM_CLR_TLS_TOM: 1226 case TCP_TLSOM_CLR_QUIES: 1227 return (t4_ctloutput_tls(so, sopt)); 1228 default: 1229 return (tcp_ctloutput(so, sopt)); 1230 } 1231 } 1232 1233 static int 1234 t4_tom_mod_load(void) 1235 { 1236 struct protosw *tcp_protosw, *tcp6_protosw; 1237 1238 /* CPL handlers */ 1239 t4_init_connect_cpl_handlers(); 1240 t4_init_listen_cpl_handlers(); 1241 t4_init_cpl_io_handlers(); 1242 1243 t4_ddp_mod_load(); 1244 t4_tls_mod_load(); 1245 1246 tcp_protosw = pffindproto(PF_INET, IPPROTO_TCP, SOCK_STREAM); 1247 if (tcp_protosw == NULL) 1248 return (ENOPROTOOPT); 1249 bcopy(tcp_protosw, &toe_protosw, sizeof(toe_protosw)); 1250 bcopy(tcp_protosw->pr_usrreqs, &toe_usrreqs, sizeof(toe_usrreqs)); 1251 toe_usrreqs.pru_aio_queue = t4_aio_queue_tom; 1252 toe_protosw.pr_ctloutput = t4_ctloutput_tom; 1253 toe_protosw.pr_usrreqs = &toe_usrreqs; 1254 1255 tcp6_protosw = pffindproto(PF_INET6, IPPROTO_TCP, SOCK_STREAM); 1256 if (tcp6_protosw == NULL) 1257 return (ENOPROTOOPT); 1258 bcopy(tcp6_protosw, &toe6_protosw, sizeof(toe6_protosw)); 1259 bcopy(tcp6_protosw->pr_usrreqs, &toe6_usrreqs, sizeof(toe6_usrreqs)); 1260 toe6_usrreqs.pru_aio_queue = t4_aio_queue_tom; 1261 toe6_protosw.pr_ctloutput = t4_ctloutput_tom; 1262 toe6_protosw.pr_usrreqs = &toe6_usrreqs; 1263 1264 TIMEOUT_TASK_INIT(taskqueue_thread, &clip_task, 0, t4_clip_task, NULL); 1265 ifaddr_evhandler = EVENTHANDLER_REGISTER(ifaddr_event, 1266 t4_tom_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY); 1267 1268 return (t4_register_uld(&tom_uld_info)); 1269 } 1270 1271 static void 1272 tom_uninit(struct adapter *sc, void *arg __unused) 1273 { 1274 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tomun")) 1275 return; 1276 1277 /* Try to free resources (works only if no port has IFCAP_TOE) */ 1278 if (uld_active(sc, ULD_TOM)) 1279 t4_deactivate_uld(sc, ULD_TOM); 1280 1281 end_synchronized_op(sc, 0); 1282 } 1283 1284 static int 1285 t4_tom_mod_unload(void) 1286 { 1287 t4_iterate(tom_uninit, NULL); 1288 1289 if (t4_unregister_uld(&tom_uld_info) == EBUSY) 1290 return (EBUSY); 1291 1292 if (ifaddr_evhandler) { 1293 EVENTHANDLER_DEREGISTER(ifaddr_event, ifaddr_evhandler); 1294 taskqueue_cancel_timeout(taskqueue_thread, &clip_task, NULL); 1295 } 1296 1297 t4_tls_mod_unload(); 1298 t4_ddp_mod_unload(); 1299 1300 t4_uninit_connect_cpl_handlers(); 1301 t4_uninit_listen_cpl_handlers(); 1302 t4_uninit_cpl_io_handlers(); 1303 1304 return (0); 1305 } 1306 #endif /* TCP_OFFLOAD */ 1307 1308 static int 1309 t4_tom_modevent(module_t mod, int cmd, void *arg) 1310 { 1311 int rc = 0; 1312 1313 #ifdef TCP_OFFLOAD 1314 switch (cmd) { 1315 case MOD_LOAD: 1316 rc = t4_tom_mod_load(); 1317 break; 1318 1319 case MOD_UNLOAD: 1320 rc = t4_tom_mod_unload(); 1321 break; 1322 1323 default: 1324 rc = EINVAL; 1325 } 1326 #else 1327 printf("t4_tom: compiled without TCP_OFFLOAD support.\n"); 1328 rc = EOPNOTSUPP; 1329 #endif 1330 return (rc); 1331 } 1332 1333 static moduledata_t t4_tom_moddata= { 1334 "t4_tom", 1335 t4_tom_modevent, 1336 0 1337 }; 1338 1339 MODULE_VERSION(t4_tom, 1); 1340 MODULE_DEPEND(t4_tom, toecore, 1, 1, 1); 1341 MODULE_DEPEND(t4_tom, t4nex, 1, 1, 1); 1342 DECLARE_MODULE(t4_tom, t4_tom_moddata, SI_SUB_EXEC, SI_ORDER_ANY); 1343