1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 #include "opt_ratelimit.h" 36 37 #include <sys/param.h> 38 #include <sys/types.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/ktr.h> 42 #include <sys/lock.h> 43 #include <sys/limits.h> 44 #include <sys/module.h> 45 #include <sys/protosw.h> 46 #include <sys/domain.h> 47 #include <sys/refcount.h> 48 #include <sys/rmlock.h> 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/taskqueue.h> 52 #include <net/if.h> 53 #include <net/if_var.h> 54 #include <netinet/in.h> 55 #include <netinet/in_pcb.h> 56 #include <netinet/in_var.h> 57 #include <netinet/ip.h> 58 #include <netinet/ip6.h> 59 #include <netinet6/scope6_var.h> 60 #define TCPSTATES 61 #include <netinet/tcp_fsm.h> 62 #include <netinet/tcp_timer.h> 63 #include <netinet/tcp_var.h> 64 #include <netinet/toecore.h> 65 66 #ifdef TCP_OFFLOAD 67 #include "common/common.h" 68 #include "common/t4_msg.h" 69 #include "common/t4_regs.h" 70 #include "common/t4_regs_values.h" 71 #include "common/t4_tcb.h" 72 #include "tom/t4_tom_l2t.h" 73 #include "tom/t4_tom.h" 74 75 static struct protosw toe_protosw; 76 static struct pr_usrreqs toe_usrreqs; 77 78 static struct protosw toe6_protosw; 79 static struct pr_usrreqs toe6_usrreqs; 80 81 /* Module ops */ 82 static int t4_tom_mod_load(void); 83 static int t4_tom_mod_unload(void); 84 static int t4_tom_modevent(module_t, int, void *); 85 86 /* ULD ops and helpers */ 87 static int t4_tom_activate(struct adapter *); 88 static int t4_tom_deactivate(struct adapter *); 89 90 static struct uld_info tom_uld_info = { 91 .uld_id = ULD_TOM, 92 .activate = t4_tom_activate, 93 .deactivate = t4_tom_deactivate, 94 }; 95 96 static void queue_tid_release(struct adapter *, int); 97 static void release_offload_resources(struct toepcb *); 98 static int alloc_tid_tabs(struct tid_info *); 99 static void free_tid_tabs(struct tid_info *); 100 static int add_lip(struct adapter *, struct in6_addr *); 101 static int delete_lip(struct adapter *, struct in6_addr *); 102 static struct clip_entry *search_lip(struct tom_data *, struct in6_addr *); 103 static void init_clip_table(struct adapter *, struct tom_data *); 104 static void update_clip(struct adapter *, void *); 105 static void t4_clip_task(void *, int); 106 static void update_clip_table(struct adapter *, struct tom_data *); 107 static void destroy_clip_table(struct adapter *, struct tom_data *); 108 static void free_tom_data(struct adapter *, struct tom_data *); 109 static void reclaim_wr_resources(void *, int); 110 111 static int in6_ifaddr_gen; 112 static eventhandler_tag ifaddr_evhandler; 113 static struct timeout_task clip_task; 114 115 struct toepcb * 116 alloc_toepcb(struct vi_info *vi, int txqid, int rxqid, int flags) 117 { 118 struct port_info *pi = vi->pi; 119 struct adapter *sc = pi->adapter; 120 struct toepcb *toep; 121 int tx_credits, txsd_total, len; 122 123 /* 124 * The firmware counts tx work request credits in units of 16 bytes 125 * each. Reserve room for an ABORT_REQ so the driver never has to worry 126 * about tx credits if it wants to abort a connection. 127 */ 128 tx_credits = sc->params.ofldq_wr_cred; 129 tx_credits -= howmany(sizeof(struct cpl_abort_req), 16); 130 131 /* 132 * Shortest possible tx work request is a fw_ofld_tx_data_wr + 1 byte 133 * immediate payload, and firmware counts tx work request credits in 134 * units of 16 byte. Calculate the maximum work requests possible. 135 */ 136 txsd_total = tx_credits / 137 howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16); 138 139 if (txqid < 0) 140 txqid = (arc4random() % vi->nofldtxq) + vi->first_ofld_txq; 141 KASSERT(txqid >= vi->first_ofld_txq && 142 txqid < vi->first_ofld_txq + vi->nofldtxq, 143 ("%s: txqid %d for vi %p (first %d, n %d)", __func__, txqid, vi, 144 vi->first_ofld_txq, vi->nofldtxq)); 145 146 if (rxqid < 0) 147 rxqid = (arc4random() % vi->nofldrxq) + vi->first_ofld_rxq; 148 KASSERT(rxqid >= vi->first_ofld_rxq && 149 rxqid < vi->first_ofld_rxq + vi->nofldrxq, 150 ("%s: rxqid %d for vi %p (first %d, n %d)", __func__, rxqid, vi, 151 vi->first_ofld_rxq, vi->nofldrxq)); 152 153 len = offsetof(struct toepcb, txsd) + 154 txsd_total * sizeof(struct ofld_tx_sdesc); 155 156 toep = malloc(len, M_CXGBE, M_ZERO | flags); 157 if (toep == NULL) 158 return (NULL); 159 160 refcount_init(&toep->refcount, 1); 161 toep->td = sc->tom_softc; 162 toep->vi = vi; 163 toep->tc_idx = -1; 164 toep->tx_total = tx_credits; 165 toep->tx_credits = tx_credits; 166 toep->ofld_txq = &sc->sge.ofld_txq[txqid]; 167 toep->ofld_rxq = &sc->sge.ofld_rxq[rxqid]; 168 toep->ctrlq = &sc->sge.ctrlq[pi->port_id]; 169 mbufq_init(&toep->ulp_pduq, INT_MAX); 170 mbufq_init(&toep->ulp_pdu_reclaimq, INT_MAX); 171 toep->txsd_total = txsd_total; 172 toep->txsd_avail = txsd_total; 173 toep->txsd_pidx = 0; 174 toep->txsd_cidx = 0; 175 aiotx_init_toep(toep); 176 ddp_init_toep(toep); 177 178 return (toep); 179 } 180 181 struct toepcb * 182 hold_toepcb(struct toepcb *toep) 183 { 184 185 refcount_acquire(&toep->refcount); 186 return (toep); 187 } 188 189 void 190 free_toepcb(struct toepcb *toep) 191 { 192 193 if (refcount_release(&toep->refcount) == 0) 194 return; 195 196 KASSERT(!(toep->flags & TPF_ATTACHED), 197 ("%s: attached to an inpcb", __func__)); 198 KASSERT(!(toep->flags & TPF_CPL_PENDING), 199 ("%s: CPL pending", __func__)); 200 201 ddp_uninit_toep(toep); 202 free(toep, M_CXGBE); 203 } 204 205 /* 206 * Set up the socket for TCP offload. 207 */ 208 void 209 offload_socket(struct socket *so, struct toepcb *toep) 210 { 211 struct tom_data *td = toep->td; 212 struct inpcb *inp = sotoinpcb(so); 213 struct tcpcb *tp = intotcpcb(inp); 214 struct sockbuf *sb; 215 216 INP_WLOCK_ASSERT(inp); 217 218 /* Update socket */ 219 sb = &so->so_snd; 220 SOCKBUF_LOCK(sb); 221 sb->sb_flags |= SB_NOCOALESCE; 222 SOCKBUF_UNLOCK(sb); 223 sb = &so->so_rcv; 224 SOCKBUF_LOCK(sb); 225 sb->sb_flags |= SB_NOCOALESCE; 226 if (inp->inp_vflag & INP_IPV6) 227 so->so_proto = &toe6_protosw; 228 else 229 so->so_proto = &toe_protosw; 230 SOCKBUF_UNLOCK(sb); 231 232 /* Update TCP PCB */ 233 tp->tod = &td->tod; 234 tp->t_toe = toep; 235 tp->t_flags |= TF_TOE; 236 237 /* Install an extra hold on inp */ 238 toep->inp = inp; 239 toep->flags |= TPF_ATTACHED; 240 in_pcbref(inp); 241 242 /* Add the TOE PCB to the active list */ 243 mtx_lock(&td->toep_list_lock); 244 TAILQ_INSERT_HEAD(&td->toep_list, toep, link); 245 mtx_unlock(&td->toep_list_lock); 246 } 247 248 /* This is _not_ the normal way to "unoffload" a socket. */ 249 void 250 undo_offload_socket(struct socket *so) 251 { 252 struct inpcb *inp = sotoinpcb(so); 253 struct tcpcb *tp = intotcpcb(inp); 254 struct toepcb *toep = tp->t_toe; 255 struct tom_data *td = toep->td; 256 struct sockbuf *sb; 257 258 INP_WLOCK_ASSERT(inp); 259 260 sb = &so->so_snd; 261 SOCKBUF_LOCK(sb); 262 sb->sb_flags &= ~SB_NOCOALESCE; 263 SOCKBUF_UNLOCK(sb); 264 sb = &so->so_rcv; 265 SOCKBUF_LOCK(sb); 266 sb->sb_flags &= ~SB_NOCOALESCE; 267 SOCKBUF_UNLOCK(sb); 268 269 tp->tod = NULL; 270 tp->t_toe = NULL; 271 tp->t_flags &= ~TF_TOE; 272 273 toep->inp = NULL; 274 toep->flags &= ~TPF_ATTACHED; 275 if (in_pcbrele_wlocked(inp)) 276 panic("%s: inp freed.", __func__); 277 278 mtx_lock(&td->toep_list_lock); 279 TAILQ_REMOVE(&td->toep_list, toep, link); 280 mtx_unlock(&td->toep_list_lock); 281 } 282 283 static void 284 release_offload_resources(struct toepcb *toep) 285 { 286 struct tom_data *td = toep->td; 287 struct adapter *sc = td_adapter(td); 288 int tid = toep->tid; 289 290 KASSERT(!(toep->flags & TPF_CPL_PENDING), 291 ("%s: %p has CPL pending.", __func__, toep)); 292 KASSERT(!(toep->flags & TPF_ATTACHED), 293 ("%s: %p is still attached.", __func__, toep)); 294 295 CTR5(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p, ce %p)", 296 __func__, toep, tid, toep->l2te, toep->ce); 297 298 /* 299 * These queues should have been emptied at approximately the same time 300 * that a normal connection's socket's so_snd would have been purged or 301 * drained. Do _not_ clean up here. 302 */ 303 MPASS(mbufq_len(&toep->ulp_pduq) == 0); 304 MPASS(mbufq_len(&toep->ulp_pdu_reclaimq) == 0); 305 #ifdef INVARIANTS 306 ddp_assert_empty(toep); 307 #endif 308 309 if (toep->l2te) 310 t4_l2t_release(toep->l2te); 311 312 if (tid >= 0) { 313 remove_tid(sc, tid, toep->ce ? 2 : 1); 314 release_tid(sc, tid, toep->ctrlq); 315 } 316 317 if (toep->ce) 318 release_lip(td, toep->ce); 319 320 #ifdef RATELIMIT 321 if (toep->tc_idx != -1) 322 t4_release_cl_rl_kbps(sc, toep->vi->pi->port_id, toep->tc_idx); 323 #endif 324 mtx_lock(&td->toep_list_lock); 325 TAILQ_REMOVE(&td->toep_list, toep, link); 326 mtx_unlock(&td->toep_list_lock); 327 328 free_toepcb(toep); 329 } 330 331 /* 332 * The kernel is done with the TCP PCB and this is our opportunity to unhook the 333 * toepcb hanging off of it. If the TOE driver is also done with the toepcb (no 334 * pending CPL) then it is time to release all resources tied to the toepcb. 335 * 336 * Also gets called when an offloaded active open fails and the TOM wants the 337 * kernel to take the TCP PCB back. 338 */ 339 static void 340 t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp) 341 { 342 #if defined(KTR) || defined(INVARIANTS) 343 struct inpcb *inp = tp->t_inpcb; 344 #endif 345 struct toepcb *toep = tp->t_toe; 346 347 INP_WLOCK_ASSERT(inp); 348 349 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 350 KASSERT(toep->flags & TPF_ATTACHED, 351 ("%s: not attached", __func__)); 352 353 #ifdef KTR 354 if (tp->t_state == TCPS_SYN_SENT) { 355 CTR6(KTR_CXGBE, "%s: atid %d, toep %p (0x%x), inp %p (0x%x)", 356 __func__, toep->tid, toep, toep->flags, inp, 357 inp->inp_flags); 358 } else { 359 CTR6(KTR_CXGBE, 360 "t4_pcb_detach: tid %d (%s), toep %p (0x%x), inp %p (0x%x)", 361 toep->tid, tcpstates[tp->t_state], toep, toep->flags, inp, 362 inp->inp_flags); 363 } 364 #endif 365 366 tp->t_toe = NULL; 367 tp->t_flags &= ~TF_TOE; 368 toep->flags &= ~TPF_ATTACHED; 369 370 if (!(toep->flags & TPF_CPL_PENDING)) 371 release_offload_resources(toep); 372 } 373 374 /* 375 * setsockopt handler. 376 */ 377 static void 378 t4_ctloutput(struct toedev *tod, struct tcpcb *tp, int dir, int name) 379 { 380 struct adapter *sc = tod->tod_softc; 381 struct toepcb *toep = tp->t_toe; 382 383 if (dir == SOPT_GET) 384 return; 385 386 CTR4(KTR_CXGBE, "%s: tp %p, dir %u, name %u", __func__, tp, dir, name); 387 388 switch (name) { 389 case TCP_NODELAY: 390 if (tp->t_state != TCPS_ESTABLISHED) 391 break; 392 t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_T_FLAGS, 393 V_TF_NAGLE(1), V_TF_NAGLE(tp->t_flags & TF_NODELAY ? 0 : 1), 394 0, 0, toep->ofld_rxq->iq.abs_id); 395 break; 396 default: 397 break; 398 } 399 } 400 401 /* 402 * The TOE driver will not receive any more CPLs for the tid associated with the 403 * toepcb; release the hold on the inpcb. 404 */ 405 void 406 final_cpl_received(struct toepcb *toep) 407 { 408 struct inpcb *inp = toep->inp; 409 410 KASSERT(inp != NULL, ("%s: inp is NULL", __func__)); 411 INP_WLOCK_ASSERT(inp); 412 KASSERT(toep->flags & TPF_CPL_PENDING, 413 ("%s: CPL not pending already?", __func__)); 414 415 CTR6(KTR_CXGBE, "%s: tid %d, toep %p (0x%x), inp %p (0x%x)", 416 __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags); 417 418 if (toep->ulp_mode == ULP_MODE_TCPDDP) 419 release_ddp_resources(toep); 420 toep->inp = NULL; 421 toep->flags &= ~TPF_CPL_PENDING; 422 mbufq_drain(&toep->ulp_pdu_reclaimq); 423 424 if (!(toep->flags & TPF_ATTACHED)) 425 release_offload_resources(toep); 426 427 if (!in_pcbrele_wlocked(inp)) 428 INP_WUNLOCK(inp); 429 } 430 431 void 432 insert_tid(struct adapter *sc, int tid, void *ctx, int ntids) 433 { 434 struct tid_info *t = &sc->tids; 435 436 t->tid_tab[tid] = ctx; 437 atomic_add_int(&t->tids_in_use, ntids); 438 } 439 440 void * 441 lookup_tid(struct adapter *sc, int tid) 442 { 443 struct tid_info *t = &sc->tids; 444 445 return (t->tid_tab[tid]); 446 } 447 448 void 449 update_tid(struct adapter *sc, int tid, void *ctx) 450 { 451 struct tid_info *t = &sc->tids; 452 453 t->tid_tab[tid] = ctx; 454 } 455 456 void 457 remove_tid(struct adapter *sc, int tid, int ntids) 458 { 459 struct tid_info *t = &sc->tids; 460 461 t->tid_tab[tid] = NULL; 462 atomic_subtract_int(&t->tids_in_use, ntids); 463 } 464 465 void 466 release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq) 467 { 468 struct wrqe *wr; 469 struct cpl_tid_release *req; 470 471 wr = alloc_wrqe(sizeof(*req), ctrlq); 472 if (wr == NULL) { 473 queue_tid_release(sc, tid); /* defer */ 474 return; 475 } 476 req = wrtod(wr); 477 478 INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid); 479 480 t4_wrq_tx(sc, wr); 481 } 482 483 static void 484 queue_tid_release(struct adapter *sc, int tid) 485 { 486 487 CXGBE_UNIMPLEMENTED("deferred tid release"); 488 } 489 490 /* 491 * What mtu_idx to use, given a 4-tuple and/or an MSS cap 492 */ 493 int 494 find_best_mtu_idx(struct adapter *sc, struct in_conninfo *inc, int pmss) 495 { 496 unsigned short *mtus = &sc->params.mtus[0]; 497 int i, mss, n; 498 499 KASSERT(inc != NULL || pmss > 0, 500 ("%s: at least one of inc/pmss must be specified", __func__)); 501 502 mss = inc ? tcp_mssopt(inc) : pmss; 503 if (pmss > 0 && mss > pmss) 504 mss = pmss; 505 506 if (inc->inc_flags & INC_ISIPV6) 507 n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 508 else 509 n = sizeof(struct ip) + sizeof(struct tcphdr); 510 511 for (i = 0; i < NMTUS - 1 && mtus[i + 1] <= mss + n; i++) 512 continue; 513 514 return (i); 515 } 516 517 /* 518 * Determine the receive window size for a socket. 519 */ 520 u_long 521 select_rcv_wnd(struct socket *so) 522 { 523 unsigned long wnd; 524 525 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 526 527 wnd = sbspace(&so->so_rcv); 528 if (wnd < MIN_RCV_WND) 529 wnd = MIN_RCV_WND; 530 531 return min(wnd, MAX_RCV_WND); 532 } 533 534 int 535 select_rcv_wscale(void) 536 { 537 int wscale = 0; 538 unsigned long space = sb_max; 539 540 if (space > MAX_RCV_WND) 541 space = MAX_RCV_WND; 542 543 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < space) 544 wscale++; 545 546 return (wscale); 547 } 548 549 /* 550 * socket so could be a listening socket too. 551 */ 552 uint64_t 553 calc_opt0(struct socket *so, struct vi_info *vi, struct l2t_entry *e, 554 int mtu_idx, int rscale, int rx_credits, int ulp_mode) 555 { 556 uint64_t opt0; 557 558 KASSERT(rx_credits <= M_RCV_BUFSIZ, 559 ("%s: rcv_bufsiz too high", __func__)); 560 561 opt0 = F_TCAM_BYPASS | V_WND_SCALE(rscale) | V_MSS_IDX(mtu_idx) | 562 V_ULP_MODE(ulp_mode) | V_RCV_BUFSIZ(rx_credits); 563 564 if (so != NULL) { 565 struct inpcb *inp = sotoinpcb(so); 566 struct tcpcb *tp = intotcpcb(inp); 567 int keepalive = tcp_always_keepalive || 568 so_options_get(so) & SO_KEEPALIVE; 569 570 opt0 |= V_NAGLE((tp->t_flags & TF_NODELAY) == 0); 571 opt0 |= V_KEEP_ALIVE(keepalive != 0); 572 } 573 574 if (e != NULL) 575 opt0 |= V_L2T_IDX(e->idx); 576 577 if (vi != NULL) { 578 opt0 |= V_SMAC_SEL(vi->smt_idx); 579 opt0 |= V_TX_CHAN(vi->pi->tx_chan); 580 } 581 582 return htobe64(opt0); 583 } 584 585 uint64_t 586 select_ntuple(struct vi_info *vi, struct l2t_entry *e) 587 { 588 struct adapter *sc = vi->pi->adapter; 589 struct tp_params *tp = &sc->params.tp; 590 uint16_t viid = vi->viid; 591 uint64_t ntuple = 0; 592 593 /* 594 * Initialize each of the fields which we care about which are present 595 * in the Compressed Filter Tuple. 596 */ 597 if (tp->vlan_shift >= 0 && e->vlan != CPL_L2T_VLAN_NONE) 598 ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift; 599 600 if (tp->port_shift >= 0) 601 ntuple |= (uint64_t)e->lport << tp->port_shift; 602 603 if (tp->protocol_shift >= 0) 604 ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift; 605 606 if (tp->vnic_shift >= 0) { 607 uint32_t vf = G_FW_VIID_VIN(viid); 608 uint32_t pf = G_FW_VIID_PFN(viid); 609 uint32_t vld = G_FW_VIID_VIVLD(viid); 610 611 ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vf) | V_FT_VNID_ID_PF(pf) | 612 V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift; 613 } 614 615 if (is_t4(sc)) 616 return (htobe32((uint32_t)ntuple)); 617 else 618 return (htobe64(V_FILTER_TUPLE(ntuple))); 619 } 620 621 void 622 set_tcpddp_ulp_mode(struct toepcb *toep) 623 { 624 625 toep->ulp_mode = ULP_MODE_TCPDDP; 626 toep->ddp_flags = DDP_OK; 627 } 628 629 int 630 negative_advice(int status) 631 { 632 633 return (status == CPL_ERR_RTX_NEG_ADVICE || 634 status == CPL_ERR_PERSIST_NEG_ADVICE || 635 status == CPL_ERR_KEEPALV_NEG_ADVICE); 636 } 637 638 static int 639 alloc_tid_tabs(struct tid_info *t) 640 { 641 size_t size; 642 unsigned int i; 643 644 size = t->ntids * sizeof(*t->tid_tab) + 645 t->natids * sizeof(*t->atid_tab) + 646 t->nstids * sizeof(*t->stid_tab); 647 648 t->tid_tab = malloc(size, M_CXGBE, M_ZERO | M_NOWAIT); 649 if (t->tid_tab == NULL) 650 return (ENOMEM); 651 652 mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF); 653 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; 654 t->afree = t->atid_tab; 655 t->atids_in_use = 0; 656 for (i = 1; i < t->natids; i++) 657 t->atid_tab[i - 1].next = &t->atid_tab[i]; 658 t->atid_tab[t->natids - 1].next = NULL; 659 660 mtx_init(&t->stid_lock, "stid lock", NULL, MTX_DEF); 661 t->stid_tab = (struct listen_ctx **)&t->atid_tab[t->natids]; 662 t->stids_in_use = 0; 663 TAILQ_INIT(&t->stids); 664 t->nstids_free_head = t->nstids; 665 666 atomic_store_rel_int(&t->tids_in_use, 0); 667 668 return (0); 669 } 670 671 static void 672 free_tid_tabs(struct tid_info *t) 673 { 674 KASSERT(t->tids_in_use == 0, 675 ("%s: %d tids still in use.", __func__, t->tids_in_use)); 676 KASSERT(t->atids_in_use == 0, 677 ("%s: %d atids still in use.", __func__, t->atids_in_use)); 678 KASSERT(t->stids_in_use == 0, 679 ("%s: %d tids still in use.", __func__, t->stids_in_use)); 680 681 free(t->tid_tab, M_CXGBE); 682 t->tid_tab = NULL; 683 684 if (mtx_initialized(&t->atid_lock)) 685 mtx_destroy(&t->atid_lock); 686 if (mtx_initialized(&t->stid_lock)) 687 mtx_destroy(&t->stid_lock); 688 } 689 690 static int 691 add_lip(struct adapter *sc, struct in6_addr *lip) 692 { 693 struct fw_clip_cmd c; 694 695 ASSERT_SYNCHRONIZED_OP(sc); 696 /* mtx_assert(&td->clip_table_lock, MA_OWNED); */ 697 698 memset(&c, 0, sizeof(c)); 699 c.op_to_write = htonl(V_FW_CMD_OP(FW_CLIP_CMD) | F_FW_CMD_REQUEST | 700 F_FW_CMD_WRITE); 701 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c)); 702 c.ip_hi = *(uint64_t *)&lip->s6_addr[0]; 703 c.ip_lo = *(uint64_t *)&lip->s6_addr[8]; 704 705 return (-t4_wr_mbox_ns(sc, sc->mbox, &c, sizeof(c), &c)); 706 } 707 708 static int 709 delete_lip(struct adapter *sc, struct in6_addr *lip) 710 { 711 struct fw_clip_cmd c; 712 713 ASSERT_SYNCHRONIZED_OP(sc); 714 /* mtx_assert(&td->clip_table_lock, MA_OWNED); */ 715 716 memset(&c, 0, sizeof(c)); 717 c.op_to_write = htonl(V_FW_CMD_OP(FW_CLIP_CMD) | F_FW_CMD_REQUEST | 718 F_FW_CMD_READ); 719 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c)); 720 c.ip_hi = *(uint64_t *)&lip->s6_addr[0]; 721 c.ip_lo = *(uint64_t *)&lip->s6_addr[8]; 722 723 return (-t4_wr_mbox_ns(sc, sc->mbox, &c, sizeof(c), &c)); 724 } 725 726 static struct clip_entry * 727 search_lip(struct tom_data *td, struct in6_addr *lip) 728 { 729 struct clip_entry *ce; 730 731 mtx_assert(&td->clip_table_lock, MA_OWNED); 732 733 TAILQ_FOREACH(ce, &td->clip_table, link) { 734 if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) 735 return (ce); 736 } 737 738 return (NULL); 739 } 740 741 struct clip_entry * 742 hold_lip(struct tom_data *td, struct in6_addr *lip, struct clip_entry *ce) 743 { 744 745 mtx_lock(&td->clip_table_lock); 746 if (ce == NULL) 747 ce = search_lip(td, lip); 748 if (ce != NULL) 749 ce->refcount++; 750 mtx_unlock(&td->clip_table_lock); 751 752 return (ce); 753 } 754 755 void 756 release_lip(struct tom_data *td, struct clip_entry *ce) 757 { 758 759 mtx_lock(&td->clip_table_lock); 760 KASSERT(search_lip(td, &ce->lip) == ce, 761 ("%s: CLIP entry %p p not in CLIP table.", __func__, ce)); 762 KASSERT(ce->refcount > 0, 763 ("%s: CLIP entry %p has refcount 0", __func__, ce)); 764 --ce->refcount; 765 mtx_unlock(&td->clip_table_lock); 766 } 767 768 static void 769 init_clip_table(struct adapter *sc, struct tom_data *td) 770 { 771 772 ASSERT_SYNCHRONIZED_OP(sc); 773 774 mtx_init(&td->clip_table_lock, "CLIP table lock", NULL, MTX_DEF); 775 TAILQ_INIT(&td->clip_table); 776 td->clip_gen = -1; 777 778 update_clip_table(sc, td); 779 } 780 781 static void 782 update_clip(struct adapter *sc, void *arg __unused) 783 { 784 785 if (begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4tomuc")) 786 return; 787 788 if (uld_active(sc, ULD_TOM)) 789 update_clip_table(sc, sc->tom_softc); 790 791 end_synchronized_op(sc, LOCK_HELD); 792 } 793 794 static void 795 t4_clip_task(void *arg, int count) 796 { 797 798 t4_iterate(update_clip, NULL); 799 } 800 801 static void 802 update_clip_table(struct adapter *sc, struct tom_data *td) 803 { 804 struct rm_priotracker in6_ifa_tracker; 805 struct in6_ifaddr *ia; 806 struct in6_addr *lip, tlip; 807 struct clip_head stale; 808 struct clip_entry *ce, *ce_temp; 809 struct vi_info *vi; 810 int rc, gen, i, j; 811 uintptr_t last_vnet; 812 813 ASSERT_SYNCHRONIZED_OP(sc); 814 815 IN6_IFADDR_RLOCK(&in6_ifa_tracker); 816 mtx_lock(&td->clip_table_lock); 817 818 gen = atomic_load_acq_int(&in6_ifaddr_gen); 819 if (gen == td->clip_gen) 820 goto done; 821 822 TAILQ_INIT(&stale); 823 TAILQ_CONCAT(&stale, &td->clip_table, link); 824 825 /* 826 * last_vnet optimizes the common cases where all if_vnet = NULL (no 827 * VIMAGE) or all if_vnet = vnet0. 828 */ 829 last_vnet = (uintptr_t)(-1); 830 for_each_port(sc, i) 831 for_each_vi(sc->port[i], j, vi) { 832 if (last_vnet == (uintptr_t)vi->ifp->if_vnet) 833 continue; 834 835 /* XXX: races with if_vmove */ 836 CURVNET_SET(vi->ifp->if_vnet); 837 TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) { 838 lip = &ia->ia_addr.sin6_addr; 839 840 KASSERT(!IN6_IS_ADDR_MULTICAST(lip), 841 ("%s: mcast address in in6_ifaddr list", __func__)); 842 843 if (IN6_IS_ADDR_LOOPBACK(lip)) 844 continue; 845 if (IN6_IS_SCOPE_EMBED(lip)) { 846 /* Remove the embedded scope */ 847 tlip = *lip; 848 lip = &tlip; 849 in6_clearscope(lip); 850 } 851 /* 852 * XXX: how to weed out the link local address for the 853 * loopback interface? It's fe80::1 usually (always?). 854 */ 855 856 /* 857 * If it's in the main list then we already know it's 858 * not stale. 859 */ 860 TAILQ_FOREACH(ce, &td->clip_table, link) { 861 if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) 862 goto next; 863 } 864 865 /* 866 * If it's in the stale list we should move it to the 867 * main list. 868 */ 869 TAILQ_FOREACH(ce, &stale, link) { 870 if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) { 871 TAILQ_REMOVE(&stale, ce, link); 872 TAILQ_INSERT_TAIL(&td->clip_table, ce, 873 link); 874 goto next; 875 } 876 } 877 878 /* A new IP6 address; add it to the CLIP table */ 879 ce = malloc(sizeof(*ce), M_CXGBE, M_NOWAIT); 880 memcpy(&ce->lip, lip, sizeof(ce->lip)); 881 ce->refcount = 0; 882 rc = add_lip(sc, lip); 883 if (rc == 0) 884 TAILQ_INSERT_TAIL(&td->clip_table, ce, link); 885 else { 886 char ip[INET6_ADDRSTRLEN]; 887 888 inet_ntop(AF_INET6, &ce->lip, &ip[0], 889 sizeof(ip)); 890 log(LOG_ERR, "%s: could not add %s (%d)\n", 891 __func__, ip, rc); 892 free(ce, M_CXGBE); 893 } 894 next: 895 continue; 896 } 897 CURVNET_RESTORE(); 898 last_vnet = (uintptr_t)vi->ifp->if_vnet; 899 } 900 901 /* 902 * Remove stale addresses (those no longer in V_in6_ifaddrhead) that are 903 * no longer referenced by the driver. 904 */ 905 TAILQ_FOREACH_SAFE(ce, &stale, link, ce_temp) { 906 if (ce->refcount == 0) { 907 rc = delete_lip(sc, &ce->lip); 908 if (rc == 0) { 909 TAILQ_REMOVE(&stale, ce, link); 910 free(ce, M_CXGBE); 911 } else { 912 char ip[INET6_ADDRSTRLEN]; 913 914 inet_ntop(AF_INET6, &ce->lip, &ip[0], 915 sizeof(ip)); 916 log(LOG_ERR, "%s: could not delete %s (%d)\n", 917 __func__, ip, rc); 918 } 919 } 920 } 921 /* The ones that are still referenced need to stay in the CLIP table */ 922 TAILQ_CONCAT(&td->clip_table, &stale, link); 923 924 td->clip_gen = gen; 925 done: 926 mtx_unlock(&td->clip_table_lock); 927 IN6_IFADDR_RUNLOCK(&in6_ifa_tracker); 928 } 929 930 static void 931 destroy_clip_table(struct adapter *sc, struct tom_data *td) 932 { 933 struct clip_entry *ce, *ce_temp; 934 935 if (mtx_initialized(&td->clip_table_lock)) { 936 mtx_lock(&td->clip_table_lock); 937 TAILQ_FOREACH_SAFE(ce, &td->clip_table, link, ce_temp) { 938 KASSERT(ce->refcount == 0, 939 ("%s: CLIP entry %p still in use (%d)", __func__, 940 ce, ce->refcount)); 941 TAILQ_REMOVE(&td->clip_table, ce, link); 942 delete_lip(sc, &ce->lip); 943 free(ce, M_CXGBE); 944 } 945 mtx_unlock(&td->clip_table_lock); 946 mtx_destroy(&td->clip_table_lock); 947 } 948 } 949 950 static void 951 free_tom_data(struct adapter *sc, struct tom_data *td) 952 { 953 954 ASSERT_SYNCHRONIZED_OP(sc); 955 956 KASSERT(TAILQ_EMPTY(&td->toep_list), 957 ("%s: TOE PCB list is not empty.", __func__)); 958 KASSERT(td->lctx_count == 0, 959 ("%s: lctx hash table is not empty.", __func__)); 960 961 t4_free_ppod_region(&td->pr); 962 destroy_clip_table(sc, td); 963 964 if (td->listen_mask != 0) 965 hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask); 966 967 if (mtx_initialized(&td->unsent_wr_lock)) 968 mtx_destroy(&td->unsent_wr_lock); 969 if (mtx_initialized(&td->lctx_hash_lock)) 970 mtx_destroy(&td->lctx_hash_lock); 971 if (mtx_initialized(&td->toep_list_lock)) 972 mtx_destroy(&td->toep_list_lock); 973 974 free_tid_tabs(&sc->tids); 975 free(td, M_CXGBE); 976 } 977 978 static void 979 reclaim_wr_resources(void *arg, int count) 980 { 981 struct tom_data *td = arg; 982 STAILQ_HEAD(, wrqe) twr_list = STAILQ_HEAD_INITIALIZER(twr_list); 983 struct cpl_act_open_req *cpl; 984 u_int opcode, atid; 985 struct wrqe *wr; 986 struct adapter *sc; 987 988 mtx_lock(&td->unsent_wr_lock); 989 STAILQ_SWAP(&td->unsent_wr_list, &twr_list, wrqe); 990 mtx_unlock(&td->unsent_wr_lock); 991 992 while ((wr = STAILQ_FIRST(&twr_list)) != NULL) { 993 STAILQ_REMOVE_HEAD(&twr_list, link); 994 995 cpl = wrtod(wr); 996 opcode = GET_OPCODE(cpl); 997 998 switch (opcode) { 999 case CPL_ACT_OPEN_REQ: 1000 case CPL_ACT_OPEN_REQ6: 1001 atid = G_TID_TID(be32toh(OPCODE_TID(cpl))); 1002 sc = td_adapter(td); 1003 1004 CTR2(KTR_CXGBE, "%s: atid %u ", __func__, atid); 1005 act_open_failure_cleanup(sc, atid, EHOSTUNREACH); 1006 free(wr, M_CXGBE); 1007 break; 1008 default: 1009 log(LOG_ERR, "%s: leaked work request %p, wr_len %d, " 1010 "opcode %x\n", __func__, wr, wr->wr_len, opcode); 1011 /* WR not freed here; go look at it with a debugger. */ 1012 } 1013 } 1014 } 1015 1016 /* 1017 * Ground control to Major TOM 1018 * Commencing countdown, engines on 1019 */ 1020 static int 1021 t4_tom_activate(struct adapter *sc) 1022 { 1023 struct tom_data *td; 1024 struct toedev *tod; 1025 struct vi_info *vi; 1026 struct sge_ofld_rxq *ofld_rxq; 1027 int i, j, rc, v; 1028 1029 ASSERT_SYNCHRONIZED_OP(sc); 1030 1031 /* per-adapter softc for TOM */ 1032 td = malloc(sizeof(*td), M_CXGBE, M_ZERO | M_NOWAIT); 1033 if (td == NULL) 1034 return (ENOMEM); 1035 1036 /* List of TOE PCBs and associated lock */ 1037 mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF); 1038 TAILQ_INIT(&td->toep_list); 1039 1040 /* Listen context */ 1041 mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF); 1042 td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE, 1043 &td->listen_mask, HASH_NOWAIT); 1044 1045 /* List of WRs for which L2 resolution failed */ 1046 mtx_init(&td->unsent_wr_lock, "Unsent WR list lock", NULL, MTX_DEF); 1047 STAILQ_INIT(&td->unsent_wr_list); 1048 TASK_INIT(&td->reclaim_wr_resources, 0, reclaim_wr_resources, td); 1049 1050 /* TID tables */ 1051 rc = alloc_tid_tabs(&sc->tids); 1052 if (rc != 0) 1053 goto done; 1054 1055 rc = t4_init_ppod_region(&td->pr, &sc->vres.ddp, 1056 t4_read_reg(sc, A_ULP_RX_TDDP_PSZ), "TDDP page pods"); 1057 if (rc != 0) 1058 goto done; 1059 t4_set_reg_field(sc, A_ULP_RX_TDDP_TAGMASK, 1060 V_TDDPTAGMASK(M_TDDPTAGMASK), td->pr.pr_tag_mask); 1061 1062 /* CLIP table for IPv6 offload */ 1063 init_clip_table(sc, td); 1064 1065 /* toedev ops */ 1066 tod = &td->tod; 1067 init_toedev(tod); 1068 tod->tod_softc = sc; 1069 tod->tod_connect = t4_connect; 1070 tod->tod_listen_start = t4_listen_start; 1071 tod->tod_listen_stop = t4_listen_stop; 1072 tod->tod_rcvd = t4_rcvd; 1073 tod->tod_output = t4_tod_output; 1074 tod->tod_send_rst = t4_send_rst; 1075 tod->tod_send_fin = t4_send_fin; 1076 tod->tod_pcb_detach = t4_pcb_detach; 1077 tod->tod_l2_update = t4_l2_update; 1078 tod->tod_syncache_added = t4_syncache_added; 1079 tod->tod_syncache_removed = t4_syncache_removed; 1080 tod->tod_syncache_respond = t4_syncache_respond; 1081 tod->tod_offload_socket = t4_offload_socket; 1082 tod->tod_ctloutput = t4_ctloutput; 1083 1084 for_each_port(sc, i) { 1085 for_each_vi(sc->port[i], v, vi) { 1086 TOEDEV(vi->ifp) = &td->tod; 1087 for_each_ofld_rxq(vi, j, ofld_rxq) { 1088 ofld_rxq->iq.set_tcb_rpl = do_set_tcb_rpl; 1089 ofld_rxq->iq.l2t_write_rpl = do_l2t_write_rpl2; 1090 } 1091 } 1092 } 1093 1094 sc->tom_softc = td; 1095 register_toedev(sc->tom_softc); 1096 1097 done: 1098 if (rc != 0) 1099 free_tom_data(sc, td); 1100 return (rc); 1101 } 1102 1103 static int 1104 t4_tom_deactivate(struct adapter *sc) 1105 { 1106 int rc = 0; 1107 struct tom_data *td = sc->tom_softc; 1108 1109 ASSERT_SYNCHRONIZED_OP(sc); 1110 1111 if (td == NULL) 1112 return (0); /* XXX. KASSERT? */ 1113 1114 if (sc->offload_map != 0) 1115 return (EBUSY); /* at least one port has IFCAP_TOE enabled */ 1116 1117 if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI)) 1118 return (EBUSY); /* both iWARP and iSCSI rely on the TOE. */ 1119 1120 mtx_lock(&td->toep_list_lock); 1121 if (!TAILQ_EMPTY(&td->toep_list)) 1122 rc = EBUSY; 1123 mtx_unlock(&td->toep_list_lock); 1124 1125 mtx_lock(&td->lctx_hash_lock); 1126 if (td->lctx_count > 0) 1127 rc = EBUSY; 1128 mtx_unlock(&td->lctx_hash_lock); 1129 1130 taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources); 1131 mtx_lock(&td->unsent_wr_lock); 1132 if (!STAILQ_EMPTY(&td->unsent_wr_list)) 1133 rc = EBUSY; 1134 mtx_unlock(&td->unsent_wr_lock); 1135 1136 if (rc == 0) { 1137 unregister_toedev(sc->tom_softc); 1138 free_tom_data(sc, td); 1139 sc->tom_softc = NULL; 1140 } 1141 1142 return (rc); 1143 } 1144 1145 static void 1146 t4_tom_ifaddr_event(void *arg __unused, struct ifnet *ifp) 1147 { 1148 1149 atomic_add_rel_int(&in6_ifaddr_gen, 1); 1150 taskqueue_enqueue_timeout(taskqueue_thread, &clip_task, -hz / 4); 1151 } 1152 1153 static int 1154 t4_aio_queue_tom(struct socket *so, struct kaiocb *job) 1155 { 1156 struct tcpcb *tp = so_sototcpcb(so); 1157 struct toepcb *toep = tp->t_toe; 1158 int error; 1159 1160 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1161 error = t4_aio_queue_ddp(so, job); 1162 if (error != EOPNOTSUPP) 1163 return (error); 1164 } 1165 1166 return (t4_aio_queue_aiotx(so, job)); 1167 } 1168 1169 static int 1170 t4_tom_mod_load(void) 1171 { 1172 int rc; 1173 struct protosw *tcp_protosw, *tcp6_protosw; 1174 1175 /* CPL handlers */ 1176 t4_init_connect_cpl_handlers(); 1177 t4_init_listen_cpl_handlers(); 1178 t4_init_cpl_io_handlers(); 1179 1180 rc = t4_ddp_mod_load(); 1181 if (rc != 0) 1182 return (rc); 1183 1184 tcp_protosw = pffindproto(PF_INET, IPPROTO_TCP, SOCK_STREAM); 1185 if (tcp_protosw == NULL) 1186 return (ENOPROTOOPT); 1187 bcopy(tcp_protosw, &toe_protosw, sizeof(toe_protosw)); 1188 bcopy(tcp_protosw->pr_usrreqs, &toe_usrreqs, sizeof(toe_usrreqs)); 1189 toe_usrreqs.pru_aio_queue = t4_aio_queue_tom; 1190 toe_protosw.pr_usrreqs = &toe_usrreqs; 1191 1192 tcp6_protosw = pffindproto(PF_INET6, IPPROTO_TCP, SOCK_STREAM); 1193 if (tcp6_protosw == NULL) 1194 return (ENOPROTOOPT); 1195 bcopy(tcp6_protosw, &toe6_protosw, sizeof(toe6_protosw)); 1196 bcopy(tcp6_protosw->pr_usrreqs, &toe6_usrreqs, sizeof(toe6_usrreqs)); 1197 toe6_usrreqs.pru_aio_queue = t4_aio_queue_tom; 1198 toe6_protosw.pr_usrreqs = &toe6_usrreqs; 1199 1200 TIMEOUT_TASK_INIT(taskqueue_thread, &clip_task, 0, t4_clip_task, NULL); 1201 ifaddr_evhandler = EVENTHANDLER_REGISTER(ifaddr_event, 1202 t4_tom_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY); 1203 1204 rc = t4_register_uld(&tom_uld_info); 1205 if (rc != 0) 1206 t4_tom_mod_unload(); 1207 1208 return (rc); 1209 } 1210 1211 static void 1212 tom_uninit(struct adapter *sc, void *arg __unused) 1213 { 1214 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tomun")) 1215 return; 1216 1217 /* Try to free resources (works only if no port has IFCAP_TOE) */ 1218 if (uld_active(sc, ULD_TOM)) 1219 t4_deactivate_uld(sc, ULD_TOM); 1220 1221 end_synchronized_op(sc, 0); 1222 } 1223 1224 static int 1225 t4_tom_mod_unload(void) 1226 { 1227 t4_iterate(tom_uninit, NULL); 1228 1229 if (t4_unregister_uld(&tom_uld_info) == EBUSY) 1230 return (EBUSY); 1231 1232 if (ifaddr_evhandler) { 1233 EVENTHANDLER_DEREGISTER(ifaddr_event, ifaddr_evhandler); 1234 taskqueue_cancel_timeout(taskqueue_thread, &clip_task, NULL); 1235 } 1236 1237 t4_ddp_mod_unload(); 1238 1239 t4_uninit_connect_cpl_handlers(); 1240 t4_uninit_listen_cpl_handlers(); 1241 t4_uninit_cpl_io_handlers(); 1242 1243 return (0); 1244 } 1245 #endif /* TCP_OFFLOAD */ 1246 1247 static int 1248 t4_tom_modevent(module_t mod, int cmd, void *arg) 1249 { 1250 int rc = 0; 1251 1252 #ifdef TCP_OFFLOAD 1253 switch (cmd) { 1254 case MOD_LOAD: 1255 rc = t4_tom_mod_load(); 1256 break; 1257 1258 case MOD_UNLOAD: 1259 rc = t4_tom_mod_unload(); 1260 break; 1261 1262 default: 1263 rc = EINVAL; 1264 } 1265 #else 1266 printf("t4_tom: compiled without TCP_OFFLOAD support.\n"); 1267 rc = EOPNOTSUPP; 1268 #endif 1269 return (rc); 1270 } 1271 1272 static moduledata_t t4_tom_moddata= { 1273 "t4_tom", 1274 t4_tom_modevent, 1275 0 1276 }; 1277 1278 MODULE_VERSION(t4_tom, 1); 1279 MODULE_DEPEND(t4_tom, toecore, 1, 1, 1); 1280 MODULE_DEPEND(t4_tom, t4nex, 1, 1, 1); 1281 DECLARE_MODULE(t4_tom, t4_tom_moddata, SI_SUB_EXEC, SI_ORDER_ANY); 1282