xref: /freebsd/sys/dev/cxgbe/cxgbei/cxgbei.c (revision 41b9077ef675c307fd1112bcea26aa3aeba1bb91)
1 /*-
2  * Copyright (c) 2012 Chelsio Communications, Inc.
3  * All rights reserved.
4  *
5  * Chelsio T5xx iSCSI driver
6  *
7  * Written by: Sreenivasa Honnur <shonnur@chelsio.com>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 
37 #include <sys/types.h>
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/module.h>
41 #include <sys/systm.h>
42 
43 #ifdef TCP_OFFLOAD
44 #include <sys/errno.h>
45 #include <sys/kthread.h>
46 #include <sys/smp.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/mbuf.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/condvar.h>
53 
54 #include <netinet/in.h>
55 #include <netinet/in_pcb.h>
56 #include <netinet/toecore.h>
57 #include <netinet/tcp_var.h>
58 #include <netinet/tcp_fsm.h>
59 
60 #include <cam/scsi/scsi_all.h>
61 #include <cam/scsi/scsi_da.h>
62 #include <cam/ctl/ctl_io.h>
63 #include <cam/ctl/ctl.h>
64 #include <cam/ctl/ctl_backend.h>
65 #include <cam/ctl/ctl_error.h>
66 #include <cam/ctl/ctl_frontend.h>
67 #include <cam/ctl/ctl_debug.h>
68 #include <cam/ctl/ctl_ha.h>
69 #include <cam/ctl/ctl_ioctl.h>
70 
71 #include <dev/iscsi/icl.h>
72 #include <dev/iscsi/iscsi_proto.h>
73 #include <dev/iscsi/iscsi_ioctl.h>
74 #include <dev/iscsi/iscsi.h>
75 #include <cam/ctl/ctl_frontend_iscsi.h>
76 
77 #include <cam/cam.h>
78 #include <cam/cam_ccb.h>
79 #include <cam/cam_xpt.h>
80 #include <cam/cam_debug.h>
81 #include <cam/cam_sim.h>
82 #include <cam/cam_xpt_sim.h>
83 #include <cam/cam_xpt_periph.h>
84 #include <cam/cam_periph.h>
85 #include <cam/cam_compat.h>
86 #include <cam/scsi/scsi_message.h>
87 
88 #include "common/common.h"
89 #include "common/t4_msg.h"
90 #include "common/t4_regs.h"     /* for PCIE_MEM_ACCESS */
91 #include "tom/t4_tom.h"
92 #include "cxgbei.h"
93 #include "cxgbei_ulp2_ddp.h"
94 
95 static int worker_thread_count;
96 static struct cxgbei_worker_thread_softc *cwt_softc;
97 static struct proc *cxgbei_proc;
98 
99 /* XXXNP some header instead. */
100 struct icl_pdu *icl_cxgbei_new_pdu(int);
101 void icl_cxgbei_new_pdu_set_conn(struct icl_pdu *, struct icl_conn *);
102 void icl_cxgbei_conn_pdu_free(struct icl_conn *, struct icl_pdu *);
103 
104 /*
105  * Direct Data Placement -
106  * Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted
107  * final destination host-memory buffers based on the Initiator Task Tag (ITT)
108  * in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
109  * The host memory address is programmed into h/w in the format of pagepod
110  * entries.
111  * The location of the pagepod entry is encoded into ddp tag which is used as
112  * the base for ITT/TTT.
113  */
114 
115 /*
116  * functions to program the pagepod in h/w
117  */
118 static void inline
119 ppod_set(struct pagepod *ppod,
120 	struct cxgbei_ulp2_pagepod_hdr *hdr,
121 	struct cxgbei_ulp2_gather_list *gl,
122 	unsigned int pidx)
123 {
124 	int i;
125 
126 	memcpy(ppod, hdr, sizeof(*hdr));
127 
128 	for (i = 0; i < (PPOD_PAGES + 1); i++, pidx++) {
129 		ppod->addr[i] = pidx < gl->nelem ?
130 			cpu_to_be64(gl->dma_sg[pidx].phys_addr) : 0ULL;
131 	}
132 }
133 
134 static void inline
135 ppod_clear(struct pagepod *ppod)
136 {
137 	memset(ppod, 0, sizeof(*ppod));
138 }
139 
140 static inline void
141 ulp_mem_io_set_hdr(struct adapter *sc, int tid, struct ulp_mem_io *req,
142 		unsigned int wr_len, unsigned int dlen,
143 		unsigned int pm_addr)
144 {
145 	struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
146 
147 	INIT_ULPTX_WR(req, wr_len, 0, 0);
148 	req->cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
149 				V_ULP_MEMIO_ORDER(is_t4(sc)) |
150 				V_T5_ULP_MEMIO_IMM(is_t5(sc)));
151 	req->dlen = htonl(V_ULP_MEMIO_DATA_LEN(dlen >> 5));
152 	req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)
153 				| V_FW_WR_FLOWID(tid));
154 	req->lock_addr = htonl(V_ULP_MEMIO_ADDR(pm_addr >> 5));
155 
156 	idata->cmd_more = htonl(V_ULPTX_CMD(ULP_TX_SC_IMM));
157 	idata->len = htonl(dlen);
158 }
159 
160 #define ULPMEM_IDATA_MAX_NPPODS 1	/* 256/PPOD_SIZE */
161 #define PCIE_MEMWIN_MAX_NPPODS 16	/* 1024/PPOD_SIZE */
162 
163 static int
164 ppod_write_idata(struct cxgbei_data *ci,
165 			struct cxgbei_ulp2_pagepod_hdr *hdr,
166 			unsigned int idx, unsigned int npods,
167 			struct cxgbei_ulp2_gather_list *gl,
168 			unsigned int gl_pidx, struct toepcb *toep)
169 {
170 	u_int dlen = PPOD_SIZE * npods;
171 	u_int pm_addr = idx * PPOD_SIZE + ci->llimit;
172 	u_int wr_len = roundup(sizeof(struct ulp_mem_io) +
173 	    sizeof(struct ulptx_idata) + dlen, 16);
174 	struct ulp_mem_io *req;
175 	struct ulptx_idata *idata;
176 	struct pagepod *ppod;
177 	u_int i;
178 	struct wrqe *wr;
179 	struct adapter *sc = toep->vi->pi->adapter;
180 
181 	wr = alloc_wrqe(wr_len, toep->ctrlq);
182 	if (wr == NULL) {
183 		CXGBE_UNIMPLEMENTED("ppod_write_idata: alloc_wrqe failure");
184 		return (ENOMEM);
185 	}
186 
187 	req = wrtod(wr);
188 	memset(req, 0, wr_len);
189 	ulp_mem_io_set_hdr(sc, toep->tid, req, wr_len, dlen, pm_addr);
190 	idata = (struct ulptx_idata *)(req + 1);
191 
192 	ppod = (struct pagepod *)(idata + 1);
193 	for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES) {
194 		if (!hdr) /* clear the pagepod */
195 			ppod_clear(ppod);
196 		else /* set the pagepod */
197 			ppod_set(ppod, hdr, gl, gl_pidx);
198 	}
199 
200 	t4_wrq_tx(sc, wr);
201 	return 0;
202 }
203 
204 int
205 t4_ddp_set_map(struct cxgbei_data *ci, void *iccp,
206     struct cxgbei_ulp2_pagepod_hdr *hdr, u_int idx, u_int npods,
207     struct cxgbei_ulp2_gather_list *gl, int reply)
208 {
209 	struct icl_cxgbei_conn *icc = (struct icl_cxgbei_conn *)iccp;
210 	struct toepcb *toep = icc->toep;
211 	int err;
212 	unsigned int pidx = 0, w_npods = 0, cnt;
213 
214 	/*
215 	 * on T4, if we use a mix of IMMD and DSGL with ULP_MEM_WRITE,
216 	 * the order would not be guaranteed, so we will stick with IMMD
217 	 */
218 	gl->tid = toep->tid;
219 	gl->port_id = toep->vi->pi->port_id;
220 	gl->egress_dev = (void *)toep->vi->ifp;
221 
222 	/* send via immediate data */
223 	for (; w_npods < npods; idx += cnt, w_npods += cnt,
224 		pidx += PPOD_PAGES) {
225 		cnt = npods - w_npods;
226 		if (cnt > ULPMEM_IDATA_MAX_NPPODS)
227 			cnt = ULPMEM_IDATA_MAX_NPPODS;
228 		err = ppod_write_idata(ci, hdr, idx, cnt, gl, pidx, toep);
229 		if (err) {
230 			printf("%s: ppod_write_idata failed\n", __func__);
231 			break;
232 		}
233 	}
234 	return err;
235 }
236 
237 void
238 t4_ddp_clear_map(struct cxgbei_data *ci, struct cxgbei_ulp2_gather_list *gl,
239     u_int tag, u_int idx, u_int npods, struct icl_cxgbei_conn *icc)
240 {
241 	struct toepcb *toep = icc->toep;
242 	int err = -1;
243 	u_int pidx = 0;
244 	u_int w_npods = 0;
245 	u_int cnt;
246 
247 	for (; w_npods < npods; idx += cnt, w_npods += cnt,
248 		pidx += PPOD_PAGES) {
249 		cnt = npods - w_npods;
250 		if (cnt > ULPMEM_IDATA_MAX_NPPODS)
251 			cnt = ULPMEM_IDATA_MAX_NPPODS;
252 		err = ppod_write_idata(ci, NULL, idx, cnt, gl, 0, toep);
253 		if (err)
254 			break;
255 	}
256 }
257 
258 static int
259 cxgbei_map_sg(struct cxgbei_sgl *sgl, struct ccb_scsiio *csio)
260 {
261 	unsigned int data_len = csio->dxfer_len;
262 	unsigned int sgoffset = (uint64_t)csio->data_ptr & PAGE_MASK;
263 	unsigned int nsge;
264 	unsigned char *sgaddr = csio->data_ptr;
265 	unsigned int len = 0;
266 
267 	nsge = (csio->dxfer_len + sgoffset + PAGE_SIZE - 1) >> PAGE_SHIFT;
268 	sgl->sg_addr = sgaddr;
269 	sgl->sg_offset = sgoffset;
270 	if (data_len <  (PAGE_SIZE - sgoffset))
271 		len = data_len;
272 	else
273 		len = PAGE_SIZE - sgoffset;
274 
275 	sgl->sg_length = len;
276 
277 	data_len -= len;
278 	sgaddr += len;
279 	sgl = sgl+1;
280 
281 	while (data_len > 0) {
282 		sgl->sg_addr = sgaddr;
283 		len = (data_len < PAGE_SIZE)? data_len: PAGE_SIZE;
284 		sgl->sg_length = len;
285 	        sgaddr += len;
286 		data_len -= len;
287 		sgl = sgl + 1;
288 	}
289 
290 	return nsge;
291 }
292 
293 static int
294 cxgbei_map_sg_tgt(struct cxgbei_sgl *sgl, union ctl_io *io)
295 {
296 	unsigned int data_len, sgoffset, nsge;
297 	unsigned char *sgaddr;
298 	unsigned int len = 0, index = 0, ctl_sg_count, i;
299 	struct ctl_sg_entry ctl_sg_entry, *ctl_sglist;
300 
301 	if (io->scsiio.kern_sg_entries > 0) {
302 		ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
303 		ctl_sg_count = io->scsiio.kern_sg_entries;
304 	} else {
305 		ctl_sglist = &ctl_sg_entry;
306 		ctl_sglist->addr = io->scsiio.kern_data_ptr;
307 		ctl_sglist->len = io->scsiio.kern_data_len;
308 		ctl_sg_count = 1;
309 	}
310 
311 	sgaddr = sgl->sg_addr = ctl_sglist[index].addr;
312 	sgoffset = sgl->sg_offset = (uint64_t)sgl->sg_addr & PAGE_MASK;
313 	data_len = ctl_sglist[index].len;
314 
315 	if (data_len <  (PAGE_SIZE - sgoffset))
316 		len = data_len;
317 	else
318 		len = PAGE_SIZE - sgoffset;
319 
320 	sgl->sg_length = len;
321 
322 	data_len -= len;
323 	sgaddr += len;
324 	sgl = sgl+1;
325 
326 	len = 0;
327 	for (i = 0;  i< ctl_sg_count; i++)
328 		len += ctl_sglist[i].len;
329 	nsge = (len + sgoffset + PAGE_SIZE -1) >> PAGE_SHIFT;
330 	while (data_len > 0) {
331 		sgl->sg_addr = sgaddr;
332 		len = (data_len < PAGE_SIZE)? data_len: PAGE_SIZE;
333 		sgl->sg_length = len;
334 		sgaddr += len;
335 		data_len -= len;
336 		sgl = sgl + 1;
337 		if (data_len == 0) {
338 			if (index == ctl_sg_count - 1)
339 				break;
340 			index++;
341 			sgaddr = ctl_sglist[index].addr;
342 			data_len = ctl_sglist[index].len;
343 		}
344 	}
345 
346 	return nsge;
347 }
348 
349 static int
350 t4_sk_ddp_tag_reserve(struct cxgbei_data *ci, struct icl_cxgbei_conn *icc,
351     u_int xferlen, struct cxgbei_sgl *sgl, u_int sgcnt, u_int *ddp_tag)
352 {
353 	struct cxgbei_ulp2_gather_list *gl;
354 	int err = -EINVAL;
355 	struct toepcb *toep = icc->toep;
356 
357 	gl = cxgbei_ulp2_ddp_make_gl_from_iscsi_sgvec(xferlen, sgl, sgcnt, ci, 0);
358 	if (gl) {
359 		err = cxgbei_ulp2_ddp_tag_reserve(ci, icc, toep->tid,
360 		    &ci->tag_format, ddp_tag, gl, 0, 0);
361 		if (err) {
362 			cxgbei_ulp2_ddp_release_gl(ci, gl);
363 		}
364 	}
365 
366 	return err;
367 }
368 
369 static unsigned int
370 cxgbei_task_reserve_itt(struct icl_conn *ic, void **prv,
371 			struct ccb_scsiio *scmd, unsigned int *itt)
372 {
373 	struct icl_cxgbei_conn *icc = ic_to_icc(ic);
374 	int xferlen = scmd->dxfer_len;
375 	struct cxgbei_task_data *tdata = NULL;
376 	struct cxgbei_sgl *sge = NULL;
377 	struct toepcb *toep = icc->toep;
378 	struct adapter *sc = td_adapter(toep->td);
379 	struct cxgbei_data *ci = sc->iscsi_ulp_softc;
380 	int err = -1;
381 
382 	MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);
383 
384 	tdata = (struct cxgbei_task_data *)*prv;
385 	if (xferlen == 0 || tdata == NULL)
386 		goto out;
387 	if (xferlen < DDP_THRESHOLD)
388 		goto out;
389 
390 	if ((scmd->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
391 		tdata->nsge = cxgbei_map_sg(tdata->sgl, scmd);
392 		if (tdata->nsge == 0) {
393 			CTR1(KTR_CXGBE, "%s: map_sg failed", __func__);
394 			return 0;
395 		}
396 		sge = tdata->sgl;
397 
398 		tdata->sc_ddp_tag = *itt;
399 
400 		CTR3(KTR_CXGBE, "%s: *itt:0x%x sc_ddp_tag:0x%x",
401 				__func__, *itt, tdata->sc_ddp_tag);
402 		if (cxgbei_ulp2_sw_tag_usable(&ci->tag_format,
403 							tdata->sc_ddp_tag)) {
404 			err = t4_sk_ddp_tag_reserve(ci, icc, scmd->dxfer_len,
405 			    sge, tdata->nsge, &tdata->sc_ddp_tag);
406 		} else {
407 			CTR3(KTR_CXGBE,
408 				"%s: itt:0x%x sc_ddp_tag:0x%x not usable",
409 				__func__, *itt, tdata->sc_ddp_tag);
410 		}
411 	}
412 out:
413 	if (err < 0)
414 		tdata->sc_ddp_tag =
415 			cxgbei_ulp2_set_non_ddp_tag(&ci->tag_format, *itt);
416 
417 	return tdata->sc_ddp_tag;
418 }
419 
420 static unsigned int
421 cxgbei_task_reserve_ttt(struct icl_conn *ic, void **prv, union ctl_io *io,
422 				unsigned int *ttt)
423 {
424 	struct icl_cxgbei_conn *icc = ic_to_icc(ic);
425 	struct toepcb *toep = icc->toep;
426 	struct adapter *sc = td_adapter(toep->td);
427 	struct cxgbei_data *ci = sc->iscsi_ulp_softc;
428 	struct cxgbei_task_data *tdata = NULL;
429 	int xferlen, err = -1;
430 	struct cxgbei_sgl *sge = NULL;
431 
432 	MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);
433 
434 	xferlen = (io->scsiio.kern_data_len - io->scsiio.ext_data_filled);
435 	tdata = (struct cxgbei_task_data *)*prv;
436 	if ((xferlen == 0) || (tdata == NULL))
437 		goto out;
438 	if (xferlen < DDP_THRESHOLD)
439 		goto out;
440 	tdata->nsge = cxgbei_map_sg_tgt(tdata->sgl, io);
441 	if (tdata->nsge == 0) {
442 		CTR1(KTR_CXGBE, "%s: map_sg failed", __func__);
443 		return 0;
444 	}
445 	sge = tdata->sgl;
446 
447 	tdata->sc_ddp_tag = *ttt;
448 	if (cxgbei_ulp2_sw_tag_usable(&ci->tag_format, tdata->sc_ddp_tag)) {
449 		err = t4_sk_ddp_tag_reserve(ci, icc, xferlen, sge,
450 		    tdata->nsge, &tdata->sc_ddp_tag);
451 	} else {
452 		CTR2(KTR_CXGBE, "%s: sc_ddp_tag:0x%x not usable",
453 				__func__, tdata->sc_ddp_tag);
454 	}
455 out:
456 	if (err < 0)
457 		tdata->sc_ddp_tag =
458 			cxgbei_ulp2_set_non_ddp_tag(&ci->tag_format, *ttt);
459 	return tdata->sc_ddp_tag;
460 }
461 
462 static int
463 t4_sk_ddp_tag_release(struct icl_cxgbei_conn *icc, unsigned int ddp_tag)
464 {
465 	struct toepcb *toep = icc->toep;
466 	struct adapter *sc = td_adapter(toep->td);
467 	struct cxgbei_data *ci = sc->iscsi_ulp_softc;
468 
469 	cxgbei_ulp2_ddp_tag_release(ci, ddp_tag, icc);
470 
471 	return (0);
472 }
473 
474 static void
475 read_pdu_limits(struct adapter *sc, uint32_t *max_tx_pdu_len,
476     uint32_t *max_rx_pdu_len)
477 {
478 	uint32_t tx_len, rx_len, r, v;
479 
480 	rx_len = t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE);
481 	tx_len = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
482 
483 	r = t4_read_reg(sc, A_TP_PARA_REG2);
484 	rx_len = min(rx_len, G_MAXRXDATA(r));
485 	tx_len = min(tx_len, G_MAXRXDATA(r));
486 
487 	r = t4_read_reg(sc, A_TP_PARA_REG7);
488 	v = min(G_PMMAXXFERLEN0(r), G_PMMAXXFERLEN1(r));
489 	rx_len = min(rx_len, v);
490 	tx_len = min(tx_len, v);
491 
492 	/* Remove after FW_FLOWC_MNEM_TXDATAPLEN_MAX fix in firmware. */
493 	tx_len = min(tx_len, 3 * 4096);
494 
495 	*max_tx_pdu_len = rounddown2(tx_len, 512);
496 	*max_rx_pdu_len = rounddown2(rx_len, 512);
497 }
498 
499 /*
500  * Initialize the software state of the iSCSI ULP driver.
501  *
502  * ENXIO means firmware didn't set up something that it was supposed to.
503  */
504 static int
505 cxgbei_init(struct adapter *sc, struct cxgbei_data *ci)
506 {
507 	int nppods, bits, rc;
508 	static const u_int pgsz_order[] = {0, 1, 2, 3};
509 
510 	MPASS(sc->vres.iscsi.size > 0);
511 
512 	ci->llimit = sc->vres.iscsi.start;
513 	ci->ulimit = sc->vres.iscsi.start + sc->vres.iscsi.size - 1;
514 	read_pdu_limits(sc, &ci->max_tx_pdu_len, &ci->max_rx_pdu_len);
515 
516 	nppods = sc->vres.iscsi.size >> IPPOD_SIZE_SHIFT;
517 	if (nppods <= 1024)
518 		return (ENXIO);
519 
520 	bits = fls(nppods);
521 	if (bits > IPPOD_IDX_MAX_SIZE)
522 		bits = IPPOD_IDX_MAX_SIZE;
523 	nppods = (1 << (bits - 1)) - 1;
524 
525 	rc = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR,
526 	    BUS_SPACE_MAXADDR, NULL, NULL, UINT32_MAX , 8, BUS_SPACE_MAXSIZE,
527 	    BUS_DMA_ALLOCNOW, NULL, NULL, &ci->ulp_ddp_tag);
528 	if (rc != 0) {
529 		device_printf(sc->dev, "%s: failed to create DMA tag: %u.\n",
530 		    __func__, rc);
531 		return (rc);
532 	}
533 
534 	ci->colors = malloc(nppods * sizeof(char), M_CXGBE, M_NOWAIT | M_ZERO);
535 	ci->gl_map = malloc(nppods * sizeof(struct cxgbei_ulp2_gather_list *),
536 	    M_CXGBE, M_NOWAIT | M_ZERO);
537 	if (ci->colors == NULL || ci->gl_map == NULL) {
538 		bus_dma_tag_destroy(ci->ulp_ddp_tag);
539 		free(ci->colors, M_CXGBE);
540 		free(ci->gl_map, M_CXGBE);
541 		return (ENOMEM);
542 	}
543 
544 	mtx_init(&ci->map_lock, "ddp lock", NULL, MTX_DEF | MTX_DUPOK);
545 	ci->nppods = nppods;
546 	ci->idx_last = nppods;
547 	ci->idx_bits = bits;
548 	ci->idx_mask = (1 << bits) - 1;
549 	ci->rsvd_tag_mask = (1 << (bits + IPPOD_IDX_SHIFT)) - 1;
550 
551 	ci->tag_format.sw_bits = bits;
552 	ci->tag_format.rsvd_bits = bits;
553 	ci->tag_format.rsvd_shift = IPPOD_IDX_SHIFT;
554 	ci->tag_format.rsvd_mask = ci->idx_mask;
555 
556 	t4_iscsi_init(sc, ci->idx_mask << IPPOD_IDX_SHIFT, pgsz_order);
557 
558 	return (rc);
559 }
560 
561 static int
562 do_rx_iscsi_hdr(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
563 {
564 	struct adapter *sc = iq->adapter;
565 	struct cpl_iscsi_hdr *cpl = mtod(m, struct cpl_iscsi_hdr *);
566 	u_int tid = GET_TID(cpl);
567 	struct toepcb *toep = lookup_tid(sc, tid);
568 	struct icl_pdu *ip;
569 	struct icl_cxgbei_pdu *icp;
570 
571 	M_ASSERTPKTHDR(m);
572 
573 	ip = icl_cxgbei_new_pdu(M_NOWAIT);
574 	if (ip == NULL)
575 		CXGBE_UNIMPLEMENTED("PDU allocation failure");
576 	icp = ip_to_icp(ip);
577 	bcopy(mtod(m, caddr_t) + sizeof(*cpl), icp->ip.ip_bhs, sizeof(struct
578 	    iscsi_bhs));
579 	icp->icp_seq = ntohl(cpl->seq);
580 	icp->icp_flags = ICPF_RX_HDR;
581 
582 	/* This is the start of a new PDU.  There should be no old state. */
583 	MPASS(toep->ulpcb2 == NULL);
584 	toep->ulpcb2 = icp;
585 
586 #if 0
587 	CTR4(KTR_CXGBE, "%s: tid %u, cpl->len hlen %u, m->m_len hlen %u",
588 	    __func__, tid, ntohs(cpl->len), m->m_len);
589 #endif
590 
591 	m_freem(m);
592 	return (0);
593 }
594 
595 static int
596 do_rx_iscsi_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
597 {
598 	struct adapter *sc = iq->adapter;
599 	struct cpl_iscsi_data *cpl =  mtod(m, struct cpl_iscsi_data *);
600 	u_int tid = GET_TID(cpl);
601 	struct toepcb *toep = lookup_tid(sc, tid);
602 	struct icl_cxgbei_pdu *icp = toep->ulpcb2;
603 
604 	M_ASSERTPKTHDR(m);
605 
606 	/* Must already have received the header (but not the data). */
607 	MPASS(icp != NULL);
608 	MPASS(icp->icp_flags == ICPF_RX_HDR);
609 	MPASS(icp->ip.ip_data_mbuf == NULL);
610 	MPASS(icp->ip.ip_data_len == 0);
611 
612 	m_adj(m, sizeof(*cpl));
613 
614 	icp->icp_flags |= ICPF_RX_FLBUF;
615 	icp->ip.ip_data_mbuf = m;
616 	icp->ip.ip_data_len = m->m_pkthdr.len;
617 
618 #if 0
619 	CTR4(KTR_CXGBE, "%s: tid %u, cpl->len dlen %u, m->m_len dlen %u",
620 	    __func__, tid, ntohs(cpl->len), m->m_len);
621 #endif
622 
623 	return (0);
624 }
625 
626 static int
627 do_rx_iscsi_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
628 {
629 	struct adapter *sc = iq->adapter;
630 	const struct cpl_rx_data_ddp *cpl = (const void *)(rss + 1);
631 	u_int tid = GET_TID(cpl);
632 	struct toepcb *toep = lookup_tid(sc, tid);
633 	struct inpcb *inp = toep->inp;
634 	struct socket *so;
635 	struct sockbuf *sb;
636 	struct tcpcb *tp;
637 	struct icl_cxgbei_conn *icc;
638 	struct icl_conn *ic;
639 	struct icl_cxgbei_pdu *icp = toep->ulpcb2;
640 	struct icl_pdu *ip;
641 	u_int pdu_len, val;
642 
643 	MPASS(m == NULL);
644 
645 	/* Must already be assembling a PDU. */
646 	MPASS(icp != NULL);
647 	MPASS(icp->icp_flags & ICPF_RX_HDR);	/* Data is optional. */
648 	ip = &icp->ip;
649 	icp->icp_flags |= ICPF_RX_STATUS;
650 	val = ntohl(cpl->ddpvld);
651 	if (val & F_DDP_PADDING_ERR)
652 		icp->icp_flags |= ICPF_PAD_ERR;
653 	if (val & F_DDP_HDRCRC_ERR)
654 		icp->icp_flags |= ICPF_HCRC_ERR;
655 	if (val & F_DDP_DATACRC_ERR)
656 		icp->icp_flags |= ICPF_DCRC_ERR;
657 	if (ip->ip_data_mbuf == NULL) {
658 		/* XXXNP: what should ip->ip_data_len be, and why? */
659 		icp->icp_flags |= ICPF_RX_DDP;
660 	}
661 	pdu_len = ntohs(cpl->len);	/* includes everything. */
662 
663 	INP_WLOCK(inp);
664 	if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT))) {
665 		CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
666 		    __func__, tid, pdu_len, inp->inp_flags);
667 		INP_WUNLOCK(inp);
668 		icl_cxgbei_conn_pdu_free(NULL, ip);
669 #ifdef INVARIANTS
670 		toep->ulpcb2 = NULL;
671 #endif
672 		return (0);
673 	}
674 
675 	tp = intotcpcb(inp);
676 	MPASS(icp->icp_seq == tp->rcv_nxt);
677 	MPASS(tp->rcv_wnd >= pdu_len);
678 	tp->rcv_nxt += pdu_len;
679 	tp->rcv_wnd -= pdu_len;
680 	tp->t_rcvtime = ticks;
681 
682 	/* update rx credits */
683 	toep->rx_credits += pdu_len;
684 	t4_rcvd(&toep->td->tod, tp);	/* XXX: sc->tom_softc.tod */
685 
686 	so = inp->inp_socket;
687 	sb = &so->so_rcv;
688 	SOCKBUF_LOCK(sb);
689 
690 	icc = toep->ulpcb;
691 	if (__predict_false(icc == NULL || sb->sb_state & SBS_CANTRCVMORE)) {
692 		CTR5(KTR_CXGBE,
693 		    "%s: tid %u, excess rx (%d bytes), icc %p, sb_state 0x%x",
694 		    __func__, tid, pdu_len, icc, sb->sb_state);
695 		SOCKBUF_UNLOCK(sb);
696 		INP_WUNLOCK(inp);
697 
698 		INP_INFO_RLOCK(&V_tcbinfo);
699 		INP_WLOCK(inp);
700 		tp = tcp_drop(tp, ECONNRESET);
701 		if (tp)
702 			INP_WUNLOCK(inp);
703 		INP_INFO_RUNLOCK(&V_tcbinfo);
704 
705 		icl_cxgbei_conn_pdu_free(NULL, ip);
706 #ifdef INVARIANTS
707 		toep->ulpcb2 = NULL;
708 #endif
709 		return (0);
710 	}
711 	MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);
712 	ic = &icc->ic;
713 	icl_cxgbei_new_pdu_set_conn(ip, ic);
714 
715 	MPASS(m == NULL); /* was unused, we'll use it now. */
716 	m = sbcut_locked(sb, sbused(sb)); /* XXXNP: toep->sb_cc accounting? */
717 	if (__predict_false(m != NULL)) {
718 		int len = m_length(m, NULL);
719 
720 		/*
721 		 * PDUs were received before the tid transitioned to ULP mode.
722 		 * Convert them to icl_cxgbei_pdus and send them to ICL before
723 		 * the PDU in icp/ip.
724 		 */
725 		CTR3(KTR_CXGBE, "%s: tid %u, %u bytes in so_rcv", __func__, tid,
726 		    len);
727 
728 		/* XXXNP: needs to be rewritten. */
729 		if (len == sizeof(struct iscsi_bhs) || len == 4 + sizeof(struct
730 		    iscsi_bhs)) {
731 			struct icl_cxgbei_pdu *icp0;
732 			struct icl_pdu *ip0;
733 
734 			ip0 = icl_cxgbei_new_pdu(M_NOWAIT);
735 			icl_cxgbei_new_pdu_set_conn(ip0, ic);
736 			if (ip0 == NULL)
737 				CXGBE_UNIMPLEMENTED("PDU allocation failure");
738 			icp0 = ip_to_icp(ip0);
739 			icp0->icp_seq = 0; /* XXX */
740 			icp0->icp_flags = ICPF_RX_HDR | ICPF_RX_STATUS;
741 			m_copydata(m, 0, sizeof(struct iscsi_bhs), (void *)ip0->ip_bhs);
742 			STAILQ_INSERT_TAIL(&icc->rcvd_pdus, ip0, ip_next);
743 		}
744 		m_freem(m);
745 	}
746 
747 #if 0
748 	CTR4(KTR_CXGBE, "%s: tid %u, pdu_len %u, pdu_flags 0x%x",
749 	    __func__, tid, pdu_len, icp->icp_flags);
750 #endif
751 
752 	STAILQ_INSERT_TAIL(&icc->rcvd_pdus, ip, ip_next);
753 	if ((icc->rx_flags & RXF_ACTIVE) == 0) {
754 		struct cxgbei_worker_thread_softc *cwt = &cwt_softc[icc->cwt];
755 
756 		mtx_lock(&cwt->cwt_lock);
757 		icc->rx_flags |= RXF_ACTIVE;
758 		TAILQ_INSERT_TAIL(&cwt->rx_head, icc, rx_link);
759 		if (cwt->cwt_state == CWT_SLEEPING) {
760 			cwt->cwt_state = CWT_RUNNING;
761 			cv_signal(&cwt->cwt_cv);
762 		}
763 		mtx_unlock(&cwt->cwt_lock);
764 	}
765 	SOCKBUF_UNLOCK(sb);
766 	INP_WUNLOCK(inp);
767 
768 #ifdef INVARIANTS
769 	toep->ulpcb2 = NULL;
770 #endif
771 
772 	return (0);
773 }
774 
775 /* initiator */
776 void
777 cxgbei_conn_task_reserve_itt(void *conn, void **prv,
778 				void *scmd, unsigned int *itt)
779 {
780 	unsigned int tag;
781 	tag = cxgbei_task_reserve_itt(conn, prv, scmd, itt);
782 	if (tag)
783 		*itt = htonl(tag);
784 	return;
785 }
786 
787 /* target */
788 void
789 cxgbei_conn_transfer_reserve_ttt(void *conn, void **prv,
790 				void *scmd, unsigned int *ttt)
791 {
792 	unsigned int tag;
793 	tag = cxgbei_task_reserve_ttt(conn, prv, scmd, ttt);
794 	if (tag)
795 		*ttt = htonl(tag);
796 	return;
797 }
798 
799 void
800 cxgbei_cleanup_task(void *conn, void *ofld_priv)
801 {
802 	struct icl_conn *ic = (struct icl_conn *)conn;
803 	struct icl_cxgbei_conn *icc = ic_to_icc(ic);
804 	struct cxgbei_task_data *tdata = ofld_priv;
805 	struct adapter *sc = icc->sc;
806 	struct cxgbei_data *ci = sc->iscsi_ulp_softc;
807 
808 	MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);
809 	MPASS(tdata != NULL);
810 
811 	if (cxgbei_ulp2_is_ddp_tag(&ci->tag_format, tdata->sc_ddp_tag))
812 		t4_sk_ddp_tag_release(icc, tdata->sc_ddp_tag);
813 	memset(tdata, 0, sizeof(*tdata));
814 }
815 
816 static int
817 cxgbei_activate(struct adapter *sc)
818 {
819 	struct cxgbei_data *ci;
820 	int rc;
821 
822 	ASSERT_SYNCHRONIZED_OP(sc);
823 
824 	if (uld_active(sc, ULD_ISCSI)) {
825 		KASSERT(0, ("%s: iSCSI offload already enabled on adapter %p",
826 		    __func__, sc));
827 		return (0);
828 	}
829 
830 	if (sc->iscsicaps == 0 || sc->vres.iscsi.size == 0) {
831 		device_printf(sc->dev,
832 		    "not iSCSI offload capable, or capability disabled.\n");
833 		return (ENOSYS);
834 	}
835 
836 	/* per-adapter softc for iSCSI */
837 	ci = malloc(sizeof(*ci), M_CXGBE, M_ZERO | M_NOWAIT);
838 	if (ci == NULL)
839 		return (ENOMEM);
840 
841 	rc = cxgbei_init(sc, ci);
842 	if (rc != 0) {
843 		free(ci, M_CXGBE);
844 		return (rc);
845 	}
846 
847 	sc->iscsi_ulp_softc = ci;
848 
849 	return (0);
850 }
851 
852 static int
853 cxgbei_deactivate(struct adapter *sc)
854 {
855 
856 	ASSERT_SYNCHRONIZED_OP(sc);
857 
858 	if (sc->iscsi_ulp_softc != NULL) {
859 		cxgbei_ddp_cleanup(sc->iscsi_ulp_softc);
860 		free(sc->iscsi_ulp_softc, M_CXGBE);
861 		sc->iscsi_ulp_softc = NULL;
862 	}
863 
864 	return (0);
865 }
866 
867 static void
868 cxgbei_activate_all(struct adapter *sc, void *arg __unused)
869 {
870 
871 	if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4isact") != 0)
872 		return;
873 
874 	/* Activate iSCSI if any port on this adapter has IFCAP_TOE enabled. */
875 	if (sc->offload_map && !uld_active(sc, ULD_ISCSI))
876 		(void) t4_activate_uld(sc, ULD_ISCSI);
877 
878 	end_synchronized_op(sc, 0);
879 }
880 
881 static void
882 cxgbei_deactivate_all(struct adapter *sc, void *arg __unused)
883 {
884 
885 	if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4isdea") != 0)
886 		return;
887 
888 	if (uld_active(sc, ULD_ISCSI))
889 	    (void) t4_deactivate_uld(sc, ULD_ISCSI);
890 
891 	end_synchronized_op(sc, 0);
892 }
893 
894 static struct uld_info cxgbei_uld_info = {
895 	.uld_id = ULD_ISCSI,
896 	.activate = cxgbei_activate,
897 	.deactivate = cxgbei_deactivate,
898 };
899 
900 static void
901 cwt_main(void *arg)
902 {
903 	struct cxgbei_worker_thread_softc *cwt = arg;
904 	struct icl_cxgbei_conn *icc = NULL;
905 	struct icl_conn *ic;
906 	struct icl_pdu *ip;
907 	struct sockbuf *sb;
908 	STAILQ_HEAD(, icl_pdu) rx_pdus = STAILQ_HEAD_INITIALIZER(rx_pdus);
909 
910 	MPASS(cwt != NULL);
911 
912 	mtx_lock(&cwt->cwt_lock);
913 	MPASS(cwt->cwt_state == 0);
914 	cwt->cwt_state = CWT_RUNNING;
915 	cv_signal(&cwt->cwt_cv);
916 
917 	while (__predict_true(cwt->cwt_state != CWT_STOP)) {
918 		cwt->cwt_state = CWT_RUNNING;
919 		while ((icc = TAILQ_FIRST(&cwt->rx_head)) != NULL) {
920 			TAILQ_REMOVE(&cwt->rx_head, icc, rx_link);
921 			mtx_unlock(&cwt->cwt_lock);
922 
923 			ic = &icc->ic;
924 			sb = &ic->ic_socket->so_rcv;
925 
926 			SOCKBUF_LOCK(sb);
927 			MPASS(icc->rx_flags & RXF_ACTIVE);
928 			if (__predict_true(!(sb->sb_state & SBS_CANTRCVMORE))) {
929 				MPASS(STAILQ_EMPTY(&rx_pdus));
930 				STAILQ_SWAP(&icc->rcvd_pdus, &rx_pdus, icl_pdu);
931 				SOCKBUF_UNLOCK(sb);
932 
933 				/* Hand over PDUs to ICL. */
934 				while ((ip = STAILQ_FIRST(&rx_pdus)) != NULL) {
935 					STAILQ_REMOVE_HEAD(&rx_pdus, ip_next);
936 					ic->ic_receive(ip);
937 				}
938 
939 				SOCKBUF_LOCK(sb);
940 				MPASS(STAILQ_EMPTY(&rx_pdus));
941 			}
942 			MPASS(icc->rx_flags & RXF_ACTIVE);
943 			if (STAILQ_EMPTY(&icc->rcvd_pdus) ||
944 			    __predict_false(sb->sb_state & SBS_CANTRCVMORE)) {
945 				icc->rx_flags &= ~RXF_ACTIVE;
946 			} else {
947 				/*
948 				 * More PDUs were received while we were busy
949 				 * handing over the previous batch to ICL.
950 				 * Re-add this connection to the end of the
951 				 * queue.
952 				 */
953 				mtx_lock(&cwt->cwt_lock);
954 				TAILQ_INSERT_TAIL(&cwt->rx_head, icc,
955 				    rx_link);
956 				mtx_unlock(&cwt->cwt_lock);
957 			}
958 			SOCKBUF_UNLOCK(sb);
959 
960 			mtx_lock(&cwt->cwt_lock);
961 		}
962 
963 		/* Inner loop doesn't check for CWT_STOP, do that first. */
964 		if (__predict_false(cwt->cwt_state == CWT_STOP))
965 			break;
966 		cwt->cwt_state = CWT_SLEEPING;
967 		cv_wait(&cwt->cwt_cv, &cwt->cwt_lock);
968 	}
969 
970 	MPASS(TAILQ_FIRST(&cwt->rx_head) == NULL);
971 	mtx_assert(&cwt->cwt_lock, MA_OWNED);
972 	cwt->cwt_state = CWT_STOPPED;
973 	cv_signal(&cwt->cwt_cv);
974 	mtx_unlock(&cwt->cwt_lock);
975 	kthread_exit();
976 }
977 
978 static int
979 start_worker_threads(void)
980 {
981 	int i, rc;
982 	struct cxgbei_worker_thread_softc *cwt;
983 
984 	worker_thread_count = min(mp_ncpus, 32);
985 	cwt_softc = malloc(worker_thread_count * sizeof(*cwt), M_CXGBE,
986 	    M_WAITOK | M_ZERO);
987 
988 	MPASS(cxgbei_proc == NULL);
989 	for (i = 0, cwt = &cwt_softc[0]; i < worker_thread_count; i++, cwt++) {
990 		mtx_init(&cwt->cwt_lock, "cwt lock", NULL, MTX_DEF);
991 		cv_init(&cwt->cwt_cv, "cwt cv");
992 		TAILQ_INIT(&cwt->rx_head);
993 		rc = kproc_kthread_add(cwt_main, cwt, &cxgbei_proc, NULL, 0, 0,
994 		    "cxgbei", "%d", i);
995 		if (rc != 0) {
996 			printf("cxgbei: failed to start thread #%d/%d (%d)\n",
997 			    i + 1, worker_thread_count, rc);
998 			mtx_destroy(&cwt->cwt_lock);
999 			cv_destroy(&cwt->cwt_cv);
1000 			bzero(&cwt, sizeof(*cwt));
1001 			if (i == 0) {
1002 				free(cwt_softc, M_CXGBE);
1003 				worker_thread_count = 0;
1004 
1005 				return (rc);
1006 			}
1007 
1008 			/* Not fatal, carry on with fewer threads. */
1009 			worker_thread_count = i;
1010 			rc = 0;
1011 			break;
1012 		}
1013 
1014 		/* Wait for thread to start before moving on to the next one. */
1015 		mtx_lock(&cwt->cwt_lock);
1016 		while (cwt->cwt_state == 0)
1017 			cv_wait(&cwt->cwt_cv, &cwt->cwt_lock);
1018 		mtx_unlock(&cwt->cwt_lock);
1019 	}
1020 
1021 	MPASS(cwt_softc != NULL);
1022 	MPASS(worker_thread_count > 0);
1023 	return (0);
1024 }
1025 
1026 static void
1027 stop_worker_threads(void)
1028 {
1029 	int i;
1030 	struct cxgbei_worker_thread_softc *cwt = &cwt_softc[0];
1031 
1032 	MPASS(worker_thread_count >= 0);
1033 
1034 	for (i = 0, cwt = &cwt_softc[0]; i < worker_thread_count; i++, cwt++) {
1035 		mtx_lock(&cwt->cwt_lock);
1036 		MPASS(cwt->cwt_state == CWT_RUNNING ||
1037 		    cwt->cwt_state == CWT_SLEEPING);
1038 		cwt->cwt_state = CWT_STOP;
1039 		cv_signal(&cwt->cwt_cv);
1040 		do {
1041 			cv_wait(&cwt->cwt_cv, &cwt->cwt_lock);
1042 		} while (cwt->cwt_state != CWT_STOPPED);
1043 		mtx_unlock(&cwt->cwt_lock);
1044 	}
1045 	free(cwt_softc, M_CXGBE);
1046 }
1047 
1048 /* Select a worker thread for a connection. */
1049 u_int
1050 cxgbei_select_worker_thread(struct icl_cxgbei_conn *icc)
1051 {
1052 	struct adapter *sc = icc->sc;
1053 	struct toepcb *toep = icc->toep;
1054 	u_int i, n;
1055 
1056 	n = worker_thread_count / sc->sge.nofldrxq;
1057 	if (n > 0)
1058 		i = toep->vi->pi->port_id * n + arc4random() % n;
1059 	else
1060 		i = arc4random() % worker_thread_count;
1061 
1062 	CTR3(KTR_CXGBE, "%s: tid %u, cwt %u", __func__, toep->tid, i);
1063 
1064 	return (i);
1065 }
1066 
1067 static int
1068 cxgbei_mod_load(void)
1069 {
1070 	int rc;
1071 
1072 	t4_register_cpl_handler(CPL_ISCSI_HDR, do_rx_iscsi_hdr);
1073 	t4_register_cpl_handler(CPL_ISCSI_DATA, do_rx_iscsi_data);
1074 	t4_register_cpl_handler(CPL_RX_ISCSI_DDP, do_rx_iscsi_ddp);
1075 
1076 	rc = start_worker_threads();
1077 	if (rc != 0)
1078 		return (rc);
1079 
1080 	rc = t4_register_uld(&cxgbei_uld_info);
1081 	if (rc != 0) {
1082 		stop_worker_threads();
1083 		return (rc);
1084 	}
1085 
1086 	t4_iterate(cxgbei_activate_all, NULL);
1087 
1088 	return (rc);
1089 }
1090 
1091 static int
1092 cxgbei_mod_unload(void)
1093 {
1094 
1095 	t4_iterate(cxgbei_deactivate_all, NULL);
1096 
1097 	if (t4_unregister_uld(&cxgbei_uld_info) == EBUSY)
1098 		return (EBUSY);
1099 
1100 	stop_worker_threads();
1101 
1102 	t4_register_cpl_handler(CPL_ISCSI_HDR, NULL);
1103 	t4_register_cpl_handler(CPL_ISCSI_DATA, NULL);
1104 	t4_register_cpl_handler(CPL_RX_ISCSI_DDP, NULL);
1105 
1106 	return (0);
1107 }
1108 #endif
1109 
1110 static int
1111 cxgbei_modevent(module_t mod, int cmd, void *arg)
1112 {
1113 	int rc = 0;
1114 
1115 #ifdef TCP_OFFLOAD
1116 	switch (cmd) {
1117 	case MOD_LOAD:
1118 		rc = cxgbei_mod_load();
1119 		if (rc == 0)
1120 			rc = icl_cxgbei_mod_load();
1121 		break;
1122 
1123 	case MOD_UNLOAD:
1124 		rc = icl_cxgbei_mod_unload();
1125 		if (rc == 0)
1126 			rc = cxgbei_mod_unload();
1127 		break;
1128 
1129 	default:
1130 		rc = EINVAL;
1131 	}
1132 #else
1133 	printf("cxgbei: compiled without TCP_OFFLOAD support.\n");
1134 	rc = EOPNOTSUPP;
1135 #endif
1136 
1137 	return (rc);
1138 }
1139 
1140 static moduledata_t cxgbei_mod = {
1141 	"cxgbei",
1142 	cxgbei_modevent,
1143 	NULL,
1144 };
1145 
1146 MODULE_VERSION(cxgbei, 1);
1147 DECLARE_MODULE(cxgbei, cxgbei_mod, SI_SUB_EXEC, SI_ORDER_ANY);
1148 MODULE_DEPEND(cxgbei, t4_tom, 1, 1, 1);
1149 MODULE_DEPEND(cxgbei, cxgbe, 1, 1, 1);
1150 MODULE_DEPEND(cxgbei, icl, 1, 1, 1);
1151