xref: /freebsd/sys/dev/cxgbe/tom/t4_tom.c (revision 8ef24a0d4b28fe230e20637f56869cc4148cd2ca)
1 /*-
2  * Copyright (c) 2012 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #include <sys/param.h>
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/ktr.h>
39 #include <sys/lock.h>
40 #include <sys/limits.h>
41 #include <sys/module.h>
42 #include <sys/protosw.h>
43 #include <sys/domain.h>
44 #include <sys/refcount.h>
45 #include <sys/rmlock.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/taskqueue.h>
49 #include <net/if.h>
50 #include <net/if_var.h>
51 #include <netinet/in.h>
52 #include <netinet/in_pcb.h>
53 #include <netinet/in_var.h>
54 #include <netinet/ip.h>
55 #include <netinet/ip6.h>
56 #include <netinet6/scope6_var.h>
57 #define TCPSTATES
58 #include <netinet/tcp_fsm.h>
59 #include <netinet/tcp_var.h>
60 #include <netinet/toecore.h>
61 
62 #ifdef TCP_OFFLOAD
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
67 #include "common/t4_tcb.h"
68 #include "tom/t4_tom_l2t.h"
69 #include "tom/t4_tom.h"
70 
71 static struct protosw ddp_protosw;
72 static struct pr_usrreqs ddp_usrreqs;
73 
74 static struct protosw ddp6_protosw;
75 static struct pr_usrreqs ddp6_usrreqs;
76 
77 /* Module ops */
78 static int t4_tom_mod_load(void);
79 static int t4_tom_mod_unload(void);
80 static int t4_tom_modevent(module_t, int, void *);
81 
82 /* ULD ops and helpers */
83 static int t4_tom_activate(struct adapter *);
84 static int t4_tom_deactivate(struct adapter *);
85 
86 static struct uld_info tom_uld_info = {
87 	.uld_id = ULD_TOM,
88 	.activate = t4_tom_activate,
89 	.deactivate = t4_tom_deactivate,
90 };
91 
92 static void queue_tid_release(struct adapter *, int);
93 static void release_offload_resources(struct toepcb *);
94 static int alloc_tid_tabs(struct tid_info *);
95 static void free_tid_tabs(struct tid_info *);
96 static int add_lip(struct adapter *, struct in6_addr *);
97 static int delete_lip(struct adapter *, struct in6_addr *);
98 static struct clip_entry *search_lip(struct tom_data *, struct in6_addr *);
99 static void init_clip_table(struct adapter *, struct tom_data *);
100 static void update_clip(struct adapter *, void *);
101 static void t4_clip_task(void *, int);
102 static void update_clip_table(struct adapter *, struct tom_data *);
103 static void destroy_clip_table(struct adapter *, struct tom_data *);
104 static void free_tom_data(struct adapter *, struct tom_data *);
105 static void reclaim_wr_resources(void *, int);
106 
107 static int in6_ifaddr_gen;
108 static eventhandler_tag ifaddr_evhandler;
109 static struct timeout_task clip_task;
110 
111 struct toepcb *
112 alloc_toepcb(struct vi_info *vi, int txqid, int rxqid, int flags)
113 {
114 	struct port_info *pi = vi->pi;
115 	struct adapter *sc = pi->adapter;
116 	struct toepcb *toep;
117 	int tx_credits, txsd_total, len;
118 
119 	/*
120 	 * The firmware counts tx work request credits in units of 16 bytes
121 	 * each.  Reserve room for an ABORT_REQ so the driver never has to worry
122 	 * about tx credits if it wants to abort a connection.
123 	 */
124 	tx_credits = sc->params.ofldq_wr_cred;
125 	tx_credits -= howmany(sizeof(struct cpl_abort_req), 16);
126 
127 	/*
128 	 * Shortest possible tx work request is a fw_ofld_tx_data_wr + 1 byte
129 	 * immediate payload, and firmware counts tx work request credits in
130 	 * units of 16 byte.  Calculate the maximum work requests possible.
131 	 */
132 	txsd_total = tx_credits /
133 	    howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16);
134 
135 	if (txqid < 0)
136 		txqid = (arc4random() % vi->nofldtxq) + vi->first_ofld_txq;
137 	KASSERT(txqid >= vi->first_ofld_txq &&
138 	    txqid < vi->first_ofld_txq + vi->nofldtxq,
139 	    ("%s: txqid %d for vi %p (first %d, n %d)", __func__, txqid, vi,
140 		vi->first_ofld_txq, vi->nofldtxq));
141 
142 	if (rxqid < 0)
143 		rxqid = (arc4random() % vi->nofldrxq) + vi->first_ofld_rxq;
144 	KASSERT(rxqid >= vi->first_ofld_rxq &&
145 	    rxqid < vi->first_ofld_rxq + vi->nofldrxq,
146 	    ("%s: rxqid %d for vi %p (first %d, n %d)", __func__, rxqid, vi,
147 		vi->first_ofld_rxq, vi->nofldrxq));
148 
149 	len = offsetof(struct toepcb, txsd) +
150 	    txsd_total * sizeof(struct ofld_tx_sdesc);
151 
152 	toep = malloc(len, M_CXGBE, M_ZERO | flags);
153 	if (toep == NULL)
154 		return (NULL);
155 
156 	refcount_init(&toep->refcount, 1);
157 	toep->td = sc->tom_softc;
158 	toep->vi = vi;
159 	toep->tx_total = tx_credits;
160 	toep->tx_credits = tx_credits;
161 	toep->ofld_txq = &sc->sge.ofld_txq[txqid];
162 	toep->ofld_rxq = &sc->sge.ofld_rxq[rxqid];
163 	toep->ctrlq = &sc->sge.ctrlq[pi->port_id];
164 	mbufq_init(&toep->ulp_pduq, INT_MAX);
165 	mbufq_init(&toep->ulp_pdu_reclaimq, INT_MAX);
166 	toep->txsd_total = txsd_total;
167 	toep->txsd_avail = txsd_total;
168 	toep->txsd_pidx = 0;
169 	toep->txsd_cidx = 0;
170 	ddp_init_toep(toep);
171 
172 	return (toep);
173 }
174 
175 struct toepcb *
176 hold_toepcb(struct toepcb *toep)
177 {
178 
179 	refcount_acquire(&toep->refcount);
180 	return (toep);
181 }
182 
183 void
184 free_toepcb(struct toepcb *toep)
185 {
186 
187 	if (refcount_release(&toep->refcount) == 0)
188 		return;
189 
190 	KASSERT(!(toep->flags & TPF_ATTACHED),
191 	    ("%s: attached to an inpcb", __func__));
192 	KASSERT(!(toep->flags & TPF_CPL_PENDING),
193 	    ("%s: CPL pending", __func__));
194 
195 	ddp_uninit_toep(toep);
196 	free(toep, M_CXGBE);
197 }
198 
199 /*
200  * Set up the socket for TCP offload.
201  */
202 void
203 offload_socket(struct socket *so, struct toepcb *toep)
204 {
205 	struct tom_data *td = toep->td;
206 	struct inpcb *inp = sotoinpcb(so);
207 	struct tcpcb *tp = intotcpcb(inp);
208 	struct sockbuf *sb;
209 
210 	INP_WLOCK_ASSERT(inp);
211 
212 	/* Update socket */
213 	sb = &so->so_snd;
214 	SOCKBUF_LOCK(sb);
215 	sb->sb_flags |= SB_NOCOALESCE;
216 	SOCKBUF_UNLOCK(sb);
217 	sb = &so->so_rcv;
218 	SOCKBUF_LOCK(sb);
219 	sb->sb_flags |= SB_NOCOALESCE;
220 	if (toep->ulp_mode == ULP_MODE_TCPDDP) {
221 		if (inp->inp_vflag & INP_IPV6)
222 			so->so_proto = &ddp6_protosw;
223 		else
224 			so->so_proto = &ddp_protosw;
225 	}
226 	SOCKBUF_UNLOCK(sb);
227 
228 	/* Update TCP PCB */
229 	tp->tod = &td->tod;
230 	tp->t_toe = toep;
231 	tp->t_flags |= TF_TOE;
232 
233 	/* Install an extra hold on inp */
234 	toep->inp = inp;
235 	toep->flags |= TPF_ATTACHED;
236 	in_pcbref(inp);
237 
238 	/* Add the TOE PCB to the active list */
239 	mtx_lock(&td->toep_list_lock);
240 	TAILQ_INSERT_HEAD(&td->toep_list, toep, link);
241 	mtx_unlock(&td->toep_list_lock);
242 }
243 
244 /* This is _not_ the normal way to "unoffload" a socket. */
245 void
246 undo_offload_socket(struct socket *so)
247 {
248 	struct inpcb *inp = sotoinpcb(so);
249 	struct tcpcb *tp = intotcpcb(inp);
250 	struct toepcb *toep = tp->t_toe;
251 	struct tom_data *td = toep->td;
252 	struct sockbuf *sb;
253 
254 	INP_WLOCK_ASSERT(inp);
255 
256 	sb = &so->so_snd;
257 	SOCKBUF_LOCK(sb);
258 	sb->sb_flags &= ~SB_NOCOALESCE;
259 	SOCKBUF_UNLOCK(sb);
260 	sb = &so->so_rcv;
261 	SOCKBUF_LOCK(sb);
262 	sb->sb_flags &= ~SB_NOCOALESCE;
263 	SOCKBUF_UNLOCK(sb);
264 
265 	tp->tod = NULL;
266 	tp->t_toe = NULL;
267 	tp->t_flags &= ~TF_TOE;
268 
269 	toep->inp = NULL;
270 	toep->flags &= ~TPF_ATTACHED;
271 	if (in_pcbrele_wlocked(inp))
272 		panic("%s: inp freed.", __func__);
273 
274 	mtx_lock(&td->toep_list_lock);
275 	TAILQ_REMOVE(&td->toep_list, toep, link);
276 	mtx_unlock(&td->toep_list_lock);
277 
278 	free_toepcb(toep);
279 }
280 
281 static void
282 release_offload_resources(struct toepcb *toep)
283 {
284 	struct tom_data *td = toep->td;
285 	struct adapter *sc = td_adapter(td);
286 	int tid = toep->tid;
287 
288 	KASSERT(!(toep->flags & TPF_CPL_PENDING),
289 	    ("%s: %p has CPL pending.", __func__, toep));
290 	KASSERT(!(toep->flags & TPF_ATTACHED),
291 	    ("%s: %p is still attached.", __func__, toep));
292 
293 	CTR5(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p, ce %p)",
294 	    __func__, toep, tid, toep->l2te, toep->ce);
295 
296 	/*
297 	 * These queues should have been emptied at approximately the same time
298 	 * that a normal connection's socket's so_snd would have been purged or
299 	 * drained.  Do _not_ clean up here.
300 	 */
301 	MPASS(mbufq_len(&toep->ulp_pduq) == 0);
302 	MPASS(mbufq_len(&toep->ulp_pdu_reclaimq) == 0);
303 #ifdef INVARIANTS
304 	ddp_assert_empty(toep);
305 #endif
306 
307 	if (toep->l2te)
308 		t4_l2t_release(toep->l2te);
309 
310 	if (tid >= 0) {
311 		remove_tid(sc, tid);
312 		release_tid(sc, tid, toep->ctrlq);
313 	}
314 
315 	if (toep->ce)
316 		release_lip(td, toep->ce);
317 
318 	mtx_lock(&td->toep_list_lock);
319 	TAILQ_REMOVE(&td->toep_list, toep, link);
320 	mtx_unlock(&td->toep_list_lock);
321 
322 	free_toepcb(toep);
323 }
324 
325 /*
326  * The kernel is done with the TCP PCB and this is our opportunity to unhook the
327  * toepcb hanging off of it.  If the TOE driver is also done with the toepcb (no
328  * pending CPL) then it is time to release all resources tied to the toepcb.
329  *
330  * Also gets called when an offloaded active open fails and the TOM wants the
331  * kernel to take the TCP PCB back.
332  */
333 static void
334 t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp)
335 {
336 #if defined(KTR) || defined(INVARIANTS)
337 	struct inpcb *inp = tp->t_inpcb;
338 #endif
339 	struct toepcb *toep = tp->t_toe;
340 
341 	INP_WLOCK_ASSERT(inp);
342 
343 	KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
344 	KASSERT(toep->flags & TPF_ATTACHED,
345 	    ("%s: not attached", __func__));
346 
347 #ifdef KTR
348 	if (tp->t_state == TCPS_SYN_SENT) {
349 		CTR6(KTR_CXGBE, "%s: atid %d, toep %p (0x%x), inp %p (0x%x)",
350 		    __func__, toep->tid, toep, toep->flags, inp,
351 		    inp->inp_flags);
352 	} else {
353 		CTR6(KTR_CXGBE,
354 		    "t4_pcb_detach: tid %d (%s), toep %p (0x%x), inp %p (0x%x)",
355 		    toep->tid, tcpstates[tp->t_state], toep, toep->flags, inp,
356 		    inp->inp_flags);
357 	}
358 #endif
359 
360 	tp->t_toe = NULL;
361 	tp->t_flags &= ~TF_TOE;
362 	toep->flags &= ~TPF_ATTACHED;
363 
364 	if (!(toep->flags & TPF_CPL_PENDING))
365 		release_offload_resources(toep);
366 }
367 
368 /*
369  * setsockopt handler.
370  */
371 static void
372 t4_ctloutput(struct toedev *tod, struct tcpcb *tp, int dir, int name)
373 {
374 	struct adapter *sc = tod->tod_softc;
375 	struct toepcb *toep = tp->t_toe;
376 
377 	if (dir == SOPT_GET)
378 		return;
379 
380 	CTR4(KTR_CXGBE, "%s: tp %p, dir %u, name %u", __func__, tp, dir, name);
381 
382 	switch (name) {
383 	case TCP_NODELAY:
384 		t4_set_tcb_field(sc, toep, 1, W_TCB_T_FLAGS, V_TF_NAGLE(1),
385 		    V_TF_NAGLE(tp->t_flags & TF_NODELAY ? 0 : 1));
386 		break;
387 	default:
388 		break;
389 	}
390 }
391 
392 /*
393  * The TOE driver will not receive any more CPLs for the tid associated with the
394  * toepcb; release the hold on the inpcb.
395  */
396 void
397 final_cpl_received(struct toepcb *toep)
398 {
399 	struct inpcb *inp = toep->inp;
400 
401 	KASSERT(inp != NULL, ("%s: inp is NULL", __func__));
402 	INP_WLOCK_ASSERT(inp);
403 	KASSERT(toep->flags & TPF_CPL_PENDING,
404 	    ("%s: CPL not pending already?", __func__));
405 
406 	CTR6(KTR_CXGBE, "%s: tid %d, toep %p (0x%x), inp %p (0x%x)",
407 	    __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags);
408 
409 	if (toep->ulp_mode == ULP_MODE_TCPDDP)
410 		release_ddp_resources(toep);
411 	toep->inp = NULL;
412 	toep->flags &= ~TPF_CPL_PENDING;
413 	mbufq_drain(&toep->ulp_pdu_reclaimq);
414 
415 	if (!(toep->flags & TPF_ATTACHED))
416 		release_offload_resources(toep);
417 
418 	if (!in_pcbrele_wlocked(inp))
419 		INP_WUNLOCK(inp);
420 }
421 
422 void
423 insert_tid(struct adapter *sc, int tid, void *ctx)
424 {
425 	struct tid_info *t = &sc->tids;
426 
427 	t->tid_tab[tid] = ctx;
428 	atomic_add_int(&t->tids_in_use, 1);
429 }
430 
431 void *
432 lookup_tid(struct adapter *sc, int tid)
433 {
434 	struct tid_info *t = &sc->tids;
435 
436 	return (t->tid_tab[tid]);
437 }
438 
439 void
440 update_tid(struct adapter *sc, int tid, void *ctx)
441 {
442 	struct tid_info *t = &sc->tids;
443 
444 	t->tid_tab[tid] = ctx;
445 }
446 
447 void
448 remove_tid(struct adapter *sc, int tid)
449 {
450 	struct tid_info *t = &sc->tids;
451 
452 	t->tid_tab[tid] = NULL;
453 	atomic_subtract_int(&t->tids_in_use, 1);
454 }
455 
456 void
457 release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq)
458 {
459 	struct wrqe *wr;
460 	struct cpl_tid_release *req;
461 
462 	wr = alloc_wrqe(sizeof(*req), ctrlq);
463 	if (wr == NULL) {
464 		queue_tid_release(sc, tid);	/* defer */
465 		return;
466 	}
467 	req = wrtod(wr);
468 
469 	INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
470 
471 	t4_wrq_tx(sc, wr);
472 }
473 
474 static void
475 queue_tid_release(struct adapter *sc, int tid)
476 {
477 
478 	CXGBE_UNIMPLEMENTED("deferred tid release");
479 }
480 
481 /*
482  * What mtu_idx to use, given a 4-tuple and/or an MSS cap
483  */
484 int
485 find_best_mtu_idx(struct adapter *sc, struct in_conninfo *inc, int pmss)
486 {
487 	unsigned short *mtus = &sc->params.mtus[0];
488 	int i, mss, n;
489 
490 	KASSERT(inc != NULL || pmss > 0,
491 	    ("%s: at least one of inc/pmss must be specified", __func__));
492 
493 	mss = inc ? tcp_mssopt(inc) : pmss;
494 	if (pmss > 0 && mss > pmss)
495 		mss = pmss;
496 
497 	if (inc->inc_flags & INC_ISIPV6)
498 		n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
499 	else
500 		n = sizeof(struct ip) + sizeof(struct tcphdr);
501 
502 	for (i = 0; i < NMTUS - 1 && mtus[i + 1] <= mss + n; i++)
503 		continue;
504 
505 	return (i);
506 }
507 
508 /*
509  * Determine the receive window size for a socket.
510  */
511 u_long
512 select_rcv_wnd(struct socket *so)
513 {
514 	unsigned long wnd;
515 
516 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
517 
518 	wnd = sbspace(&so->so_rcv);
519 	if (wnd < MIN_RCV_WND)
520 		wnd = MIN_RCV_WND;
521 
522 	return min(wnd, MAX_RCV_WND);
523 }
524 
525 int
526 select_rcv_wscale(void)
527 {
528 	int wscale = 0;
529 	unsigned long space = sb_max;
530 
531 	if (space > MAX_RCV_WND)
532 		space = MAX_RCV_WND;
533 
534 	while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < space)
535 		wscale++;
536 
537 	return (wscale);
538 }
539 
540 extern int always_keepalive;
541 #define VIID_SMACIDX(v)	(((unsigned int)(v) & 0x7f) << 1)
542 
543 /*
544  * socket so could be a listening socket too.
545  */
546 uint64_t
547 calc_opt0(struct socket *so, struct vi_info *vi, struct l2t_entry *e,
548     int mtu_idx, int rscale, int rx_credits, int ulp_mode)
549 {
550 	uint64_t opt0;
551 
552 	KASSERT(rx_credits <= M_RCV_BUFSIZ,
553 	    ("%s: rcv_bufsiz too high", __func__));
554 
555 	opt0 = F_TCAM_BYPASS | V_WND_SCALE(rscale) | V_MSS_IDX(mtu_idx) |
556 	    V_ULP_MODE(ulp_mode) | V_RCV_BUFSIZ(rx_credits);
557 
558 	if (so != NULL) {
559 		struct inpcb *inp = sotoinpcb(so);
560 		struct tcpcb *tp = intotcpcb(inp);
561 		int keepalive = always_keepalive ||
562 		    so_options_get(so) & SO_KEEPALIVE;
563 
564 		opt0 |= V_NAGLE((tp->t_flags & TF_NODELAY) == 0);
565 		opt0 |= V_KEEP_ALIVE(keepalive != 0);
566 	}
567 
568 	if (e != NULL)
569 		opt0 |= V_L2T_IDX(e->idx);
570 
571 	if (vi != NULL) {
572 		opt0 |= V_SMAC_SEL(VIID_SMACIDX(vi->viid));
573 		opt0 |= V_TX_CHAN(vi->pi->tx_chan);
574 	}
575 
576 	return htobe64(opt0);
577 }
578 
579 uint64_t
580 select_ntuple(struct vi_info *vi, struct l2t_entry *e)
581 {
582 	struct adapter *sc = vi->pi->adapter;
583 	struct tp_params *tp = &sc->params.tp;
584 	uint16_t viid = vi->viid;
585 	uint64_t ntuple = 0;
586 
587 	/*
588 	 * Initialize each of the fields which we care about which are present
589 	 * in the Compressed Filter Tuple.
590 	 */
591 	if (tp->vlan_shift >= 0 && e->vlan != CPL_L2T_VLAN_NONE)
592 		ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift;
593 
594 	if (tp->port_shift >= 0)
595 		ntuple |= (uint64_t)e->lport << tp->port_shift;
596 
597 	if (tp->protocol_shift >= 0)
598 		ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift;
599 
600 	if (tp->vnic_shift >= 0) {
601 		uint32_t vf = G_FW_VIID_VIN(viid);
602 		uint32_t pf = G_FW_VIID_PFN(viid);
603 		uint32_t vld = G_FW_VIID_VIVLD(viid);
604 
605 		ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vf) | V_FT_VNID_ID_PF(pf) |
606 		    V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
607 	}
608 
609 	if (is_t4(sc))
610 		return (htobe32((uint32_t)ntuple));
611 	else
612 		return (htobe64(V_FILTER_TUPLE(ntuple)));
613 }
614 
615 void
616 set_tcpddp_ulp_mode(struct toepcb *toep)
617 {
618 
619 	toep->ulp_mode = ULP_MODE_TCPDDP;
620 	toep->ddp_flags = DDP_OK;
621 }
622 
623 int
624 negative_advice(int status)
625 {
626 
627 	return (status == CPL_ERR_RTX_NEG_ADVICE ||
628 	    status == CPL_ERR_PERSIST_NEG_ADVICE ||
629 	    status == CPL_ERR_KEEPALV_NEG_ADVICE);
630 }
631 
632 static int
633 alloc_tid_tabs(struct tid_info *t)
634 {
635 	size_t size;
636 	unsigned int i;
637 
638 	size = t->ntids * sizeof(*t->tid_tab) +
639 	    t->natids * sizeof(*t->atid_tab) +
640 	    t->nstids * sizeof(*t->stid_tab);
641 
642 	t->tid_tab = malloc(size, M_CXGBE, M_ZERO | M_NOWAIT);
643 	if (t->tid_tab == NULL)
644 		return (ENOMEM);
645 
646 	mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF);
647 	t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
648 	t->afree = t->atid_tab;
649 	t->atids_in_use = 0;
650 	for (i = 1; i < t->natids; i++)
651 		t->atid_tab[i - 1].next = &t->atid_tab[i];
652 	t->atid_tab[t->natids - 1].next = NULL;
653 
654 	mtx_init(&t->stid_lock, "stid lock", NULL, MTX_DEF);
655 	t->stid_tab = (struct listen_ctx **)&t->atid_tab[t->natids];
656 	t->stids_in_use = 0;
657 	TAILQ_INIT(&t->stids);
658 	t->nstids_free_head = t->nstids;
659 
660 	atomic_store_rel_int(&t->tids_in_use, 0);
661 
662 	return (0);
663 }
664 
665 static void
666 free_tid_tabs(struct tid_info *t)
667 {
668 	KASSERT(t->tids_in_use == 0,
669 	    ("%s: %d tids still in use.", __func__, t->tids_in_use));
670 	KASSERT(t->atids_in_use == 0,
671 	    ("%s: %d atids still in use.", __func__, t->atids_in_use));
672 	KASSERT(t->stids_in_use == 0,
673 	    ("%s: %d tids still in use.", __func__, t->stids_in_use));
674 
675 	free(t->tid_tab, M_CXGBE);
676 	t->tid_tab = NULL;
677 
678 	if (mtx_initialized(&t->atid_lock))
679 		mtx_destroy(&t->atid_lock);
680 	if (mtx_initialized(&t->stid_lock))
681 		mtx_destroy(&t->stid_lock);
682 }
683 
684 static int
685 add_lip(struct adapter *sc, struct in6_addr *lip)
686 {
687         struct fw_clip_cmd c;
688 
689 	ASSERT_SYNCHRONIZED_OP(sc);
690 	/* mtx_assert(&td->clip_table_lock, MA_OWNED); */
691 
692         memset(&c, 0, sizeof(c));
693 	c.op_to_write = htonl(V_FW_CMD_OP(FW_CLIP_CMD) | F_FW_CMD_REQUEST |
694 	    F_FW_CMD_WRITE);
695         c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
696         c.ip_hi = *(uint64_t *)&lip->s6_addr[0];
697         c.ip_lo = *(uint64_t *)&lip->s6_addr[8];
698 
699 	return (-t4_wr_mbox_ns(sc, sc->mbox, &c, sizeof(c), &c));
700 }
701 
702 static int
703 delete_lip(struct adapter *sc, struct in6_addr *lip)
704 {
705 	struct fw_clip_cmd c;
706 
707 	ASSERT_SYNCHRONIZED_OP(sc);
708 	/* mtx_assert(&td->clip_table_lock, MA_OWNED); */
709 
710 	memset(&c, 0, sizeof(c));
711 	c.op_to_write = htonl(V_FW_CMD_OP(FW_CLIP_CMD) | F_FW_CMD_REQUEST |
712 	    F_FW_CMD_READ);
713         c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
714         c.ip_hi = *(uint64_t *)&lip->s6_addr[0];
715         c.ip_lo = *(uint64_t *)&lip->s6_addr[8];
716 
717 	return (-t4_wr_mbox_ns(sc, sc->mbox, &c, sizeof(c), &c));
718 }
719 
720 static struct clip_entry *
721 search_lip(struct tom_data *td, struct in6_addr *lip)
722 {
723 	struct clip_entry *ce;
724 
725 	mtx_assert(&td->clip_table_lock, MA_OWNED);
726 
727 	TAILQ_FOREACH(ce, &td->clip_table, link) {
728 		if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip))
729 			return (ce);
730 	}
731 
732 	return (NULL);
733 }
734 
735 struct clip_entry *
736 hold_lip(struct tom_data *td, struct in6_addr *lip)
737 {
738 	struct clip_entry *ce;
739 
740 	mtx_lock(&td->clip_table_lock);
741 	ce = search_lip(td, lip);
742 	if (ce != NULL)
743 		ce->refcount++;
744 	mtx_unlock(&td->clip_table_lock);
745 
746 	return (ce);
747 }
748 
749 void
750 release_lip(struct tom_data *td, struct clip_entry *ce)
751 {
752 
753 	mtx_lock(&td->clip_table_lock);
754 	KASSERT(search_lip(td, &ce->lip) == ce,
755 	    ("%s: CLIP entry %p p not in CLIP table.", __func__, ce));
756 	KASSERT(ce->refcount > 0,
757 	    ("%s: CLIP entry %p has refcount 0", __func__, ce));
758 	--ce->refcount;
759 	mtx_unlock(&td->clip_table_lock);
760 }
761 
762 static void
763 init_clip_table(struct adapter *sc, struct tom_data *td)
764 {
765 
766 	ASSERT_SYNCHRONIZED_OP(sc);
767 
768 	mtx_init(&td->clip_table_lock, "CLIP table lock", NULL, MTX_DEF);
769 	TAILQ_INIT(&td->clip_table);
770 	td->clip_gen = -1;
771 
772 	update_clip_table(sc, td);
773 }
774 
775 static void
776 update_clip(struct adapter *sc, void *arg __unused)
777 {
778 
779 	if (begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4tomuc"))
780 		return;
781 
782 	if (uld_active(sc, ULD_TOM))
783 		update_clip_table(sc, sc->tom_softc);
784 
785 	end_synchronized_op(sc, LOCK_HELD);
786 }
787 
788 static void
789 t4_clip_task(void *arg, int count)
790 {
791 
792 	t4_iterate(update_clip, NULL);
793 }
794 
795 static void
796 update_clip_table(struct adapter *sc, struct tom_data *td)
797 {
798 	struct rm_priotracker in6_ifa_tracker;
799 	struct in6_ifaddr *ia;
800 	struct in6_addr *lip, tlip;
801 	struct clip_head stale;
802 	struct clip_entry *ce, *ce_temp;
803 	int rc, gen = atomic_load_acq_int(&in6_ifaddr_gen);
804 
805 	ASSERT_SYNCHRONIZED_OP(sc);
806 
807 	IN6_IFADDR_RLOCK(&in6_ifa_tracker);
808 	mtx_lock(&td->clip_table_lock);
809 
810 	if (gen == td->clip_gen)
811 		goto done;
812 
813 	TAILQ_INIT(&stale);
814 	TAILQ_CONCAT(&stale, &td->clip_table, link);
815 
816 	TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) {
817 		lip = &ia->ia_addr.sin6_addr;
818 
819 		KASSERT(!IN6_IS_ADDR_MULTICAST(lip),
820 		    ("%s: mcast address in in6_ifaddr list", __func__));
821 
822 		if (IN6_IS_ADDR_LOOPBACK(lip))
823 			continue;
824 		if (IN6_IS_SCOPE_EMBED(lip)) {
825 			/* Remove the embedded scope */
826 			tlip = *lip;
827 			lip = &tlip;
828 			in6_clearscope(lip);
829 		}
830 		/*
831 		 * XXX: how to weed out the link local address for the loopback
832 		 * interface?  It's fe80::1 usually (always?).
833 		 */
834 
835 		/*
836 		 * If it's in the main list then we already know it's not stale.
837 		 */
838 		TAILQ_FOREACH(ce, &td->clip_table, link) {
839 			if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip))
840 				goto next;
841 		}
842 
843 		/*
844 		 * If it's in the stale list we should move it to the main list.
845 		 */
846 		TAILQ_FOREACH(ce, &stale, link) {
847 			if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) {
848 				TAILQ_REMOVE(&stale, ce, link);
849 				TAILQ_INSERT_TAIL(&td->clip_table, ce, link);
850 				goto next;
851 			}
852 		}
853 
854 		/* A new IP6 address; add it to the CLIP table */
855 		ce = malloc(sizeof(*ce), M_CXGBE, M_NOWAIT);
856 		memcpy(&ce->lip, lip, sizeof(ce->lip));
857 		ce->refcount = 0;
858 		rc = add_lip(sc, lip);
859 		if (rc == 0)
860 			TAILQ_INSERT_TAIL(&td->clip_table, ce, link);
861 		else {
862 			char ip[INET6_ADDRSTRLEN];
863 
864 			inet_ntop(AF_INET6, &ce->lip, &ip[0], sizeof(ip));
865 			log(LOG_ERR, "%s: could not add %s (%d)\n",
866 			    __func__, ip, rc);
867 			free(ce, M_CXGBE);
868 		}
869 next:
870 		continue;
871 	}
872 
873 	/*
874 	 * Remove stale addresses (those no longer in V_in6_ifaddrhead) that are
875 	 * no longer referenced by the driver.
876 	 */
877 	TAILQ_FOREACH_SAFE(ce, &stale, link, ce_temp) {
878 		if (ce->refcount == 0) {
879 			rc = delete_lip(sc, &ce->lip);
880 			if (rc == 0) {
881 				TAILQ_REMOVE(&stale, ce, link);
882 				free(ce, M_CXGBE);
883 			} else {
884 				char ip[INET6_ADDRSTRLEN];
885 
886 				inet_ntop(AF_INET6, &ce->lip, &ip[0],
887 				    sizeof(ip));
888 				log(LOG_ERR, "%s: could not delete %s (%d)\n",
889 				    __func__, ip, rc);
890 			}
891 		}
892 	}
893 	/* The ones that are still referenced need to stay in the CLIP table */
894 	TAILQ_CONCAT(&td->clip_table, &stale, link);
895 
896 	td->clip_gen = gen;
897 done:
898 	mtx_unlock(&td->clip_table_lock);
899 	IN6_IFADDR_RUNLOCK(&in6_ifa_tracker);
900 }
901 
902 static void
903 destroy_clip_table(struct adapter *sc, struct tom_data *td)
904 {
905 	struct clip_entry *ce, *ce_temp;
906 
907 	if (mtx_initialized(&td->clip_table_lock)) {
908 		mtx_lock(&td->clip_table_lock);
909 		TAILQ_FOREACH_SAFE(ce, &td->clip_table, link, ce_temp) {
910 			KASSERT(ce->refcount == 0,
911 			    ("%s: CLIP entry %p still in use (%d)", __func__,
912 			    ce, ce->refcount));
913 			TAILQ_REMOVE(&td->clip_table, ce, link);
914 			delete_lip(sc, &ce->lip);
915 			free(ce, M_CXGBE);
916 		}
917 		mtx_unlock(&td->clip_table_lock);
918 		mtx_destroy(&td->clip_table_lock);
919 	}
920 }
921 
922 static void
923 free_tom_data(struct adapter *sc, struct tom_data *td)
924 {
925 
926 	ASSERT_SYNCHRONIZED_OP(sc);
927 
928 	KASSERT(TAILQ_EMPTY(&td->toep_list),
929 	    ("%s: TOE PCB list is not empty.", __func__));
930 	KASSERT(td->lctx_count == 0,
931 	    ("%s: lctx hash table is not empty.", __func__));
932 
933 	t4_uninit_l2t_cpl_handlers(sc);
934 	t4_uninit_cpl_io_handlers(sc);
935 	t4_uninit_ddp(sc, td);
936 	destroy_clip_table(sc, td);
937 
938 	if (td->listen_mask != 0)
939 		hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask);
940 
941 	if (mtx_initialized(&td->unsent_wr_lock))
942 		mtx_destroy(&td->unsent_wr_lock);
943 	if (mtx_initialized(&td->lctx_hash_lock))
944 		mtx_destroy(&td->lctx_hash_lock);
945 	if (mtx_initialized(&td->toep_list_lock))
946 		mtx_destroy(&td->toep_list_lock);
947 
948 	free_tid_tabs(&sc->tids);
949 	free(td, M_CXGBE);
950 }
951 
952 static void
953 reclaim_wr_resources(void *arg, int count)
954 {
955 	struct tom_data *td = arg;
956 	STAILQ_HEAD(, wrqe) twr_list = STAILQ_HEAD_INITIALIZER(twr_list);
957 	struct cpl_act_open_req *cpl;
958 	u_int opcode, atid;
959 	struct wrqe *wr;
960 	struct adapter *sc;
961 
962 	mtx_lock(&td->unsent_wr_lock);
963 	STAILQ_SWAP(&td->unsent_wr_list, &twr_list, wrqe);
964 	mtx_unlock(&td->unsent_wr_lock);
965 
966 	while ((wr = STAILQ_FIRST(&twr_list)) != NULL) {
967 		STAILQ_REMOVE_HEAD(&twr_list, link);
968 
969 		cpl = wrtod(wr);
970 		opcode = GET_OPCODE(cpl);
971 
972 		switch (opcode) {
973 		case CPL_ACT_OPEN_REQ:
974 		case CPL_ACT_OPEN_REQ6:
975 			atid = G_TID_TID(be32toh(OPCODE_TID(cpl)));
976 			sc = td_adapter(td);
977 
978 			CTR2(KTR_CXGBE, "%s: atid %u ", __func__, atid);
979 			act_open_failure_cleanup(sc, atid, EHOSTUNREACH);
980 			free(wr, M_CXGBE);
981 			break;
982 		default:
983 			log(LOG_ERR, "%s: leaked work request %p, wr_len %d, "
984 			    "opcode %x\n", __func__, wr, wr->wr_len, opcode);
985 			/* WR not freed here; go look at it with a debugger.  */
986 		}
987 	}
988 }
989 
990 /*
991  * Ground control to Major TOM
992  * Commencing countdown, engines on
993  */
994 static int
995 t4_tom_activate(struct adapter *sc)
996 {
997 	struct tom_data *td;
998 	struct toedev *tod;
999 	struct vi_info *vi;
1000 	int i, rc, v;
1001 
1002 	ASSERT_SYNCHRONIZED_OP(sc);
1003 
1004 	/* per-adapter softc for TOM */
1005 	td = malloc(sizeof(*td), M_CXGBE, M_ZERO | M_NOWAIT);
1006 	if (td == NULL)
1007 		return (ENOMEM);
1008 
1009 	/* List of TOE PCBs and associated lock */
1010 	mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF);
1011 	TAILQ_INIT(&td->toep_list);
1012 
1013 	/* Listen context */
1014 	mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF);
1015 	td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE,
1016 	    &td->listen_mask, HASH_NOWAIT);
1017 
1018 	/* List of WRs for which L2 resolution failed */
1019 	mtx_init(&td->unsent_wr_lock, "Unsent WR list lock", NULL, MTX_DEF);
1020 	STAILQ_INIT(&td->unsent_wr_list);
1021 	TASK_INIT(&td->reclaim_wr_resources, 0, reclaim_wr_resources, td);
1022 
1023 	/* TID tables */
1024 	rc = alloc_tid_tabs(&sc->tids);
1025 	if (rc != 0)
1026 		goto done;
1027 
1028 	/* DDP page pods and CPL handlers */
1029 	t4_init_ddp(sc, td);
1030 
1031 	/* CLIP table for IPv6 offload */
1032 	init_clip_table(sc, td);
1033 
1034 	/* CPL handlers */
1035 	t4_init_connect_cpl_handlers(sc);
1036 	t4_init_l2t_cpl_handlers(sc);
1037 	t4_init_listen_cpl_handlers(sc);
1038 	t4_init_cpl_io_handlers(sc);
1039 
1040 	/* toedev ops */
1041 	tod = &td->tod;
1042 	init_toedev(tod);
1043 	tod->tod_softc = sc;
1044 	tod->tod_connect = t4_connect;
1045 	tod->tod_listen_start = t4_listen_start;
1046 	tod->tod_listen_stop = t4_listen_stop;
1047 	tod->tod_rcvd = t4_rcvd;
1048 	tod->tod_output = t4_tod_output;
1049 	tod->tod_send_rst = t4_send_rst;
1050 	tod->tod_send_fin = t4_send_fin;
1051 	tod->tod_pcb_detach = t4_pcb_detach;
1052 	tod->tod_l2_update = t4_l2_update;
1053 	tod->tod_syncache_added = t4_syncache_added;
1054 	tod->tod_syncache_removed = t4_syncache_removed;
1055 	tod->tod_syncache_respond = t4_syncache_respond;
1056 	tod->tod_offload_socket = t4_offload_socket;
1057 	tod->tod_ctloutput = t4_ctloutput;
1058 
1059 	for_each_port(sc, i) {
1060 		for_each_vi(sc->port[i], v, vi) {
1061 			TOEDEV(vi->ifp) = &td->tod;
1062 		}
1063 	}
1064 
1065 	sc->tom_softc = td;
1066 	register_toedev(sc->tom_softc);
1067 
1068 done:
1069 	if (rc != 0)
1070 		free_tom_data(sc, td);
1071 	return (rc);
1072 }
1073 
1074 static int
1075 t4_tom_deactivate(struct adapter *sc)
1076 {
1077 	int rc = 0;
1078 	struct tom_data *td = sc->tom_softc;
1079 
1080 	ASSERT_SYNCHRONIZED_OP(sc);
1081 
1082 	if (td == NULL)
1083 		return (0);	/* XXX. KASSERT? */
1084 
1085 	if (sc->offload_map != 0)
1086 		return (EBUSY);	/* at least one port has IFCAP_TOE enabled */
1087 
1088 	if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI))
1089 		return (EBUSY);	/* both iWARP and iSCSI rely on the TOE. */
1090 
1091 	mtx_lock(&td->toep_list_lock);
1092 	if (!TAILQ_EMPTY(&td->toep_list))
1093 		rc = EBUSY;
1094 	mtx_unlock(&td->toep_list_lock);
1095 
1096 	mtx_lock(&td->lctx_hash_lock);
1097 	if (td->lctx_count > 0)
1098 		rc = EBUSY;
1099 	mtx_unlock(&td->lctx_hash_lock);
1100 
1101 	taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources);
1102 	mtx_lock(&td->unsent_wr_lock);
1103 	if (!STAILQ_EMPTY(&td->unsent_wr_list))
1104 		rc = EBUSY;
1105 	mtx_unlock(&td->unsent_wr_lock);
1106 
1107 	if (rc == 0) {
1108 		unregister_toedev(sc->tom_softc);
1109 		free_tom_data(sc, td);
1110 		sc->tom_softc = NULL;
1111 	}
1112 
1113 	return (rc);
1114 }
1115 
1116 static void
1117 t4_tom_ifaddr_event(void *arg __unused, struct ifnet *ifp)
1118 {
1119 
1120 	atomic_add_rel_int(&in6_ifaddr_gen, 1);
1121 	taskqueue_enqueue_timeout(taskqueue_thread, &clip_task, -hz / 4);
1122 }
1123 
1124 static int
1125 t4_tom_mod_load(void)
1126 {
1127 	int rc;
1128 	struct protosw *tcp_protosw, *tcp6_protosw;
1129 
1130 	rc = t4_ddp_mod_load();
1131 	if (rc != 0)
1132 		return (rc);
1133 
1134 	tcp_protosw = pffindproto(PF_INET, IPPROTO_TCP, SOCK_STREAM);
1135 	if (tcp_protosw == NULL)
1136 		return (ENOPROTOOPT);
1137 	bcopy(tcp_protosw, &ddp_protosw, sizeof(ddp_protosw));
1138 	bcopy(tcp_protosw->pr_usrreqs, &ddp_usrreqs, sizeof(ddp_usrreqs));
1139 	ddp_usrreqs.pru_aio_queue = t4_aio_queue_ddp;
1140 	ddp_protosw.pr_usrreqs = &ddp_usrreqs;
1141 
1142 	tcp6_protosw = pffindproto(PF_INET6, IPPROTO_TCP, SOCK_STREAM);
1143 	if (tcp6_protosw == NULL)
1144 		return (ENOPROTOOPT);
1145 	bcopy(tcp6_protosw, &ddp6_protosw, sizeof(ddp6_protosw));
1146 	bcopy(tcp6_protosw->pr_usrreqs, &ddp6_usrreqs, sizeof(ddp6_usrreqs));
1147 	ddp6_usrreqs.pru_aio_queue = t4_aio_queue_ddp;
1148 	ddp6_protosw.pr_usrreqs = &ddp6_usrreqs;
1149 
1150 	TIMEOUT_TASK_INIT(taskqueue_thread, &clip_task, 0, t4_clip_task, NULL);
1151 	ifaddr_evhandler = EVENTHANDLER_REGISTER(ifaddr_event,
1152 	    t4_tom_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY);
1153 
1154 	rc = t4_register_uld(&tom_uld_info);
1155 	if (rc != 0)
1156 		t4_tom_mod_unload();
1157 
1158 	return (rc);
1159 }
1160 
1161 static void
1162 tom_uninit(struct adapter *sc, void *arg __unused)
1163 {
1164 	if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tomun"))
1165 		return;
1166 
1167 	/* Try to free resources (works only if no port has IFCAP_TOE) */
1168 	if (uld_active(sc, ULD_TOM))
1169 		t4_deactivate_uld(sc, ULD_TOM);
1170 
1171 	end_synchronized_op(sc, 0);
1172 }
1173 
1174 static int
1175 t4_tom_mod_unload(void)
1176 {
1177 	t4_iterate(tom_uninit, NULL);
1178 
1179 	if (t4_unregister_uld(&tom_uld_info) == EBUSY)
1180 		return (EBUSY);
1181 
1182 	if (ifaddr_evhandler) {
1183 		EVENTHANDLER_DEREGISTER(ifaddr_event, ifaddr_evhandler);
1184 		taskqueue_cancel_timeout(taskqueue_thread, &clip_task, NULL);
1185 	}
1186 
1187 	t4_ddp_mod_unload();
1188 
1189 	return (0);
1190 }
1191 #endif	/* TCP_OFFLOAD */
1192 
1193 static int
1194 t4_tom_modevent(module_t mod, int cmd, void *arg)
1195 {
1196 	int rc = 0;
1197 
1198 #ifdef TCP_OFFLOAD
1199 	switch (cmd) {
1200 	case MOD_LOAD:
1201 		rc = t4_tom_mod_load();
1202 		break;
1203 
1204 	case MOD_UNLOAD:
1205 		rc = t4_tom_mod_unload();
1206 		break;
1207 
1208 	default:
1209 		rc = EINVAL;
1210 	}
1211 #else
1212 	printf("t4_tom: compiled without TCP_OFFLOAD support.\n");
1213 	rc = EOPNOTSUPP;
1214 #endif
1215 	return (rc);
1216 }
1217 
1218 static moduledata_t t4_tom_moddata= {
1219 	"t4_tom",
1220 	t4_tom_modevent,
1221 	0
1222 };
1223 
1224 MODULE_VERSION(t4_tom, 1);
1225 MODULE_DEPEND(t4_tom, toecore, 1, 1, 1);
1226 MODULE_DEPEND(t4_tom, t4nex, 1, 1, 1);
1227 DECLARE_MODULE(t4_tom, t4_tom_moddata, SI_SUB_EXEC, SI_ORDER_ANY);
1228