xref: /freebsd/sys/dev/sfxge/common/ef10_tx.c (revision 640235e2c2ba32947f7c59d168437ffa1280f1e6)
1 /*-
2  * Copyright (c) 2012-2016 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright notice,
11  *    this list of conditions and the following disclaimer in the documentation
12  *    and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * The views and conclusions contained in the software and documentation are
27  * those of the authors and should not be interpreted as representing official
28  * policies, either expressed or implied, of the FreeBSD Project.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "efx.h"
35 #include "efx_impl.h"
36 
37 
38 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
39 
40 #if EFSYS_OPT_QSTATS
41 #define	EFX_TX_QSTAT_INCR(_etp, _stat)					\
42 	do {								\
43 		(_etp)->et_stat[_stat]++;				\
44 	_NOTE(CONSTANTCONDITION)					\
45 	} while (B_FALSE)
46 #else
47 #define	EFX_TX_QSTAT_INCR(_etp, _stat)
48 #endif
49 
50 static	__checkReturn	efx_rc_t
51 efx_mcdi_init_txq(
52 	__in		efx_nic_t *enp,
53 	__in		uint32_t size,
54 	__in		uint32_t target_evq,
55 	__in		uint32_t label,
56 	__in		uint32_t instance,
57 	__in		uint16_t flags,
58 	__in		efsys_mem_t *esmp)
59 {
60 	efx_mcdi_req_t req;
61 	uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
62 			    MC_CMD_INIT_TXQ_OUT_LEN)];
63 	efx_qword_t *dma_addr;
64 	uint64_t addr;
65 	int npages;
66 	int i;
67 	efx_rc_t rc;
68 
69 	EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
70 	    EFX_TXQ_NBUFS(EFX_TXQ_MAXNDESCS(&enp->en_nic_cfg)));
71 
72 	npages = EFX_TXQ_NBUFS(size);
73 	if (npages > MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM) {
74 		rc = EINVAL;
75 		goto fail1;
76 	}
77 
78 	(void) memset(payload, 0, sizeof (payload));
79 	req.emr_cmd = MC_CMD_INIT_TXQ;
80 	req.emr_in_buf = payload;
81 	req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
82 	req.emr_out_buf = payload;
83 	req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
84 
85 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size);
86 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
87 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
88 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
89 
90 	MCDI_IN_POPULATE_DWORD_7(req, INIT_TXQ_IN_FLAGS,
91 	    INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
92 	    INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
93 	    (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
94 	    INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
95 	    (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
96 	    INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
97 	    INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
98 	    INIT_TXQ_IN_CRC_MODE, 0,
99 	    INIT_TXQ_IN_FLAG_TIMESTAMP, 0);
100 
101 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
102 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
103 
104 	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
105 	addr = EFSYS_MEM_ADDR(esmp);
106 
107 	for (i = 0; i < npages; i++) {
108 		EFX_POPULATE_QWORD_2(*dma_addr,
109 		    EFX_DWORD_1, (uint32_t)(addr >> 32),
110 		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
111 
112 		dma_addr++;
113 		addr += EFX_BUF_SIZE;
114 	}
115 
116 	efx_mcdi_execute(enp, &req);
117 
118 	if (req.emr_rc != 0) {
119 		rc = req.emr_rc;
120 		goto fail2;
121 	}
122 
123 	return (0);
124 
125 fail2:
126 	EFSYS_PROBE(fail2);
127 fail1:
128 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
129 
130 	return (rc);
131 }
132 
133 static	__checkReturn	efx_rc_t
134 efx_mcdi_fini_txq(
135 	__in		efx_nic_t *enp,
136 	__in		uint32_t instance)
137 {
138 	efx_mcdi_req_t req;
139 	uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN,
140 			    MC_CMD_FINI_TXQ_OUT_LEN)];
141 	efx_rc_t rc;
142 
143 	(void) memset(payload, 0, sizeof (payload));
144 	req.emr_cmd = MC_CMD_FINI_TXQ;
145 	req.emr_in_buf = payload;
146 	req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
147 	req.emr_out_buf = payload;
148 	req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;
149 
150 	MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
151 
152 	efx_mcdi_execute_quiet(enp, &req);
153 
154 	if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
155 		rc = req.emr_rc;
156 		goto fail1;
157 	}
158 
159 	return (0);
160 
161 fail1:
162 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
163 
164 	return (rc);
165 }
166 
167 	__checkReturn	efx_rc_t
168 ef10_tx_init(
169 	__in		efx_nic_t *enp)
170 {
171 	_NOTE(ARGUNUSED(enp))
172 	return (0);
173 }
174 
175 			void
176 ef10_tx_fini(
177 	__in		efx_nic_t *enp)
178 {
179 	_NOTE(ARGUNUSED(enp))
180 }
181 
182 	__checkReturn	efx_rc_t
183 ef10_tx_qcreate(
184 	__in		efx_nic_t *enp,
185 	__in		unsigned int index,
186 	__in		unsigned int label,
187 	__in		efsys_mem_t *esmp,
188 	__in		size_t n,
189 	__in		uint32_t id,
190 	__in		uint16_t flags,
191 	__in		efx_evq_t *eep,
192 	__in		efx_txq_t *etp,
193 	__out		unsigned int *addedp)
194 {
195 	efx_qword_t desc;
196 	efx_rc_t rc;
197 
198 	_NOTE(ARGUNUSED(id))
199 
200 	if ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags,
201 	    esmp)) != 0)
202 		goto fail1;
203 
204 	/*
205 	 * A previous user of this TX queue may have written a descriptor to the
206 	 * TX push collector, but not pushed the doorbell (e.g. after a crash).
207 	 * The next doorbell write would then push the stale descriptor.
208 	 *
209 	 * Ensure the (per network port) TX push collector is cleared by writing
210 	 * a no-op TX option descriptor. See bug29981 for details.
211 	 */
212 	*addedp = 1;
213 	EFX_POPULATE_QWORD_4(desc,
214 	    ESF_DZ_TX_DESC_IS_OPT, 1,
215 	    ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
216 	    ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
217 	    (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
218 	    ESF_DZ_TX_OPTION_IP_CSUM,
219 	    (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0);
220 
221 	EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc);
222 	ef10_tx_qpush(etp, *addedp, 0);
223 
224 	return (0);
225 
226 fail1:
227 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
228 
229 	return (rc);
230 }
231 
232 		void
233 ef10_tx_qdestroy(
234 	__in	efx_txq_t *etp)
235 {
236 	/* FIXME */
237 	_NOTE(ARGUNUSED(etp))
238 	/* FIXME */
239 }
240 
241 	__checkReturn	efx_rc_t
242 ef10_tx_qpio_enable(
243 	__in		efx_txq_t *etp)
244 {
245 	efx_nic_t *enp = etp->et_enp;
246 	efx_piobuf_handle_t handle;
247 	efx_rc_t rc;
248 
249 	if (etp->et_pio_size != 0) {
250 		rc = EALREADY;
251 		goto fail1;
252 	}
253 
254 	/* Sub-allocate a PIO block from a piobuf */
255 	if ((rc = ef10_nic_pio_alloc(enp,
256 		    &etp->et_pio_bufnum,
257 		    &handle,
258 		    &etp->et_pio_blknum,
259 		    &etp->et_pio_offset,
260 		    &etp->et_pio_size)) != 0) {
261 		goto fail2;
262 	}
263 	EFSYS_ASSERT3U(etp->et_pio_size, !=, 0);
264 
265 	/* Link the piobuf to this TXQ */
266 	if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) {
267 		goto fail3;
268 	}
269 
270 	/*
271 	 * et_pio_offset is the offset of the sub-allocated block within the
272 	 * hardware PIO buffer. It is used as the buffer address in the PIO
273 	 * option descriptor.
274 	 *
275 	 * et_pio_write_offset is the offset of the sub-allocated block from the
276 	 * start of the write-combined memory mapping, and is used for writing
277 	 * data into the PIO buffer.
278 	 */
279 	etp->et_pio_write_offset =
280 	    (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) +
281 	    ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset;
282 
283 	return (0);
284 
285 fail3:
286 	EFSYS_PROBE(fail3);
287 	ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
288 	etp->et_pio_size = 0;
289 fail2:
290 	EFSYS_PROBE(fail2);
291 fail1:
292 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
293 
294 	return (rc);
295 }
296 
297 			void
298 ef10_tx_qpio_disable(
299 	__in		efx_txq_t *etp)
300 {
301 	efx_nic_t *enp = etp->et_enp;
302 
303 	if (etp->et_pio_size != 0) {
304 		/* Unlink the piobuf from this TXQ */
305 		ef10_nic_pio_unlink(enp, etp->et_index);
306 
307 		/* Free the sub-allocated PIO block */
308 		ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
309 		etp->et_pio_size = 0;
310 		etp->et_pio_write_offset = 0;
311 	}
312 }
313 
314 	__checkReturn	efx_rc_t
315 ef10_tx_qpio_write(
316 	__in			efx_txq_t *etp,
317 	__in_ecount(length)	uint8_t *buffer,
318 	__in			size_t length,
319 	__in			size_t offset)
320 {
321 	efx_nic_t *enp = etp->et_enp;
322 	efsys_bar_t *esbp = enp->en_esbp;
323 	uint32_t write_offset;
324 	uint32_t write_offset_limit;
325 	efx_qword_t *eqp;
326 	efx_rc_t rc;
327 
328 	EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0);
329 
330 	if (etp->et_pio_size == 0) {
331 		rc = ENOENT;
332 		goto fail1;
333 	}
334 	if (offset + length > etp->et_pio_size)	{
335 		rc = ENOSPC;
336 		goto fail2;
337 	}
338 
339 	/*
340 	 * Writes to PIO buffers must be 64 bit aligned, and multiples of
341 	 * 64 bits.
342 	 */
343 	write_offset = etp->et_pio_write_offset + offset;
344 	write_offset_limit = write_offset + length;
345 	eqp = (efx_qword_t *)buffer;
346 	while (write_offset < write_offset_limit) {
347 		EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp);
348 		eqp++;
349 		write_offset += sizeof (efx_qword_t);
350 	}
351 
352 	return (0);
353 
354 fail2:
355 	EFSYS_PROBE(fail2);
356 fail1:
357 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
358 
359 	return (rc);
360 }
361 
362 	__checkReturn	efx_rc_t
363 ef10_tx_qpio_post(
364 	__in			efx_txq_t *etp,
365 	__in			size_t pkt_length,
366 	__in			unsigned int completed,
367 	__inout			unsigned int *addedp)
368 {
369 	efx_qword_t pio_desc;
370 	unsigned int id;
371 	size_t offset;
372 	unsigned int added = *addedp;
373 	efx_rc_t rc;
374 
375 
376 	if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
377 		rc = ENOSPC;
378 		goto fail1;
379 	}
380 
381 	if (etp->et_pio_size == 0) {
382 		rc = ENOENT;
383 		goto fail2;
384 	}
385 
386 	id = added++ & etp->et_mask;
387 	offset = id * sizeof (efx_qword_t);
388 
389 	EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index,
390 		    unsigned int, id, uint32_t, etp->et_pio_offset,
391 		    size_t, pkt_length);
392 
393 	EFX_POPULATE_QWORD_5(pio_desc,
394 			ESF_DZ_TX_DESC_IS_OPT, 1,
395 			ESF_DZ_TX_OPTION_TYPE, 1,
396 			ESF_DZ_TX_PIO_CONT, 0,
397 			ESF_DZ_TX_PIO_BYTE_CNT, pkt_length,
398 			ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset);
399 
400 	EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc);
401 
402 	EFX_TX_QSTAT_INCR(etp, TX_POST_PIO);
403 
404 	*addedp = added;
405 	return (0);
406 
407 fail2:
408 	EFSYS_PROBE(fail2);
409 fail1:
410 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
411 
412 	return (rc);
413 }
414 
415 	__checkReturn	efx_rc_t
416 ef10_tx_qpost(
417 	__in		efx_txq_t *etp,
418 	__in_ecount(n)	efx_buffer_t *eb,
419 	__in		unsigned int n,
420 	__in		unsigned int completed,
421 	__inout		unsigned int *addedp)
422 {
423 	unsigned int added = *addedp;
424 	unsigned int i;
425 	efx_rc_t rc;
426 
427 	if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
428 		rc = ENOSPC;
429 		goto fail1;
430 	}
431 
432 	for (i = 0; i < n; i++) {
433 		efx_buffer_t *ebp = &eb[i];
434 		efsys_dma_addr_t addr = ebp->eb_addr;
435 		size_t size = ebp->eb_size;
436 		boolean_t eop = ebp->eb_eop;
437 		unsigned int id;
438 		size_t offset;
439 		efx_qword_t qword;
440 
441 		/* Fragments must not span 4k boundaries. */
442 		EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= (addr + size));
443 
444 		id = added++ & etp->et_mask;
445 		offset = id * sizeof (efx_qword_t);
446 
447 		EFSYS_PROBE5(tx_post, unsigned int, etp->et_index,
448 		    unsigned int, id, efsys_dma_addr_t, addr,
449 		    size_t, size, boolean_t, eop);
450 
451 		EFX_POPULATE_QWORD_5(qword,
452 		    ESF_DZ_TX_KER_TYPE, 0,
453 		    ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
454 		    ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
455 		    ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
456 		    ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
457 
458 		EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword);
459 	}
460 
461 	EFX_TX_QSTAT_INCR(etp, TX_POST);
462 
463 	*addedp = added;
464 	return (0);
465 
466 fail1:
467 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
468 
469 	return (rc);
470 }
471 
472 /*
473  * This improves performance by pushing a TX descriptor at the same time as the
474  * doorbell. The descriptor must be added to the TXQ, so that can be used if the
475  * hardware decides not to use the pushed descriptor.
476  */
477 			void
478 ef10_tx_qpush(
479 	__in		efx_txq_t *etp,
480 	__in		unsigned int added,
481 	__in		unsigned int pushed)
482 {
483 	efx_nic_t *enp = etp->et_enp;
484 	unsigned int wptr;
485 	unsigned int id;
486 	size_t offset;
487 	efx_qword_t desc;
488 	efx_oword_t oword;
489 
490 	wptr = added & etp->et_mask;
491 	id = pushed & etp->et_mask;
492 	offset = id * sizeof (efx_qword_t);
493 
494 	EFSYS_MEM_READQ(etp->et_esmp, offset, &desc);
495 	EFX_POPULATE_OWORD_3(oword,
496 	    ERF_DZ_TX_DESC_WPTR, wptr,
497 	    ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
498 	    ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
499 
500 	/* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
501 	EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id);
502 	EFSYS_PIO_WRITE_BARRIER();
503 	EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, etp->et_index,
504 				    &oword);
505 }
506 
507 	__checkReturn	efx_rc_t
508 ef10_tx_qdesc_post(
509 	__in		efx_txq_t *etp,
510 	__in_ecount(n)	efx_desc_t *ed,
511 	__in		unsigned int n,
512 	__in		unsigned int completed,
513 	__inout		unsigned int *addedp)
514 {
515 	unsigned int added = *addedp;
516 	unsigned int i;
517 	efx_rc_t rc;
518 
519 	if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
520 		rc = ENOSPC;
521 		goto fail1;
522 	}
523 
524 	for (i = 0; i < n; i++) {
525 		efx_desc_t *edp = &ed[i];
526 		unsigned int id;
527 		size_t offset;
528 
529 		id = added++ & etp->et_mask;
530 		offset = id * sizeof (efx_desc_t);
531 
532 		EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
533 	}
534 
535 	EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
536 		    unsigned int, added, unsigned int, n);
537 
538 	EFX_TX_QSTAT_INCR(etp, TX_POST);
539 
540 	*addedp = added;
541 	return (0);
542 
543 fail1:
544 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
545 
546 	return (rc);
547 }
548 
549 	void
550 ef10_tx_qdesc_dma_create(
551 	__in	efx_txq_t *etp,
552 	__in	efsys_dma_addr_t addr,
553 	__in	size_t size,
554 	__in	boolean_t eop,
555 	__out	efx_desc_t *edp)
556 {
557 	/* Fragments must not span 4k boundaries. */
558 	EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= addr + size);
559 
560 	EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
561 		    efsys_dma_addr_t, addr,
562 		    size_t, size, boolean_t, eop);
563 
564 	EFX_POPULATE_QWORD_5(edp->ed_eq,
565 		    ESF_DZ_TX_KER_TYPE, 0,
566 		    ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
567 		    ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
568 		    ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
569 		    ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
570 }
571 
572 	void
573 ef10_tx_qdesc_tso_create(
574 	__in	efx_txq_t *etp,
575 	__in	uint16_t ipv4_id,
576 	__in	uint32_t tcp_seq,
577 	__in	uint8_t  tcp_flags,
578 	__out	efx_desc_t *edp)
579 {
580 	EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,
581 		    uint16_t, ipv4_id, uint32_t, tcp_seq,
582 		    uint8_t, tcp_flags);
583 
584 	EFX_POPULATE_QWORD_5(edp->ed_eq,
585 			    ESF_DZ_TX_DESC_IS_OPT, 1,
586 			    ESF_DZ_TX_OPTION_TYPE,
587 			    ESE_DZ_TX_OPTION_DESC_TSO,
588 			    ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
589 			    ESF_DZ_TX_TSO_IP_ID, ipv4_id,
590 			    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
591 }
592 
593 	void
594 ef10_tx_qdesc_tso2_create(
595 	__in			efx_txq_t *etp,
596 	__in			uint16_t ipv4_id,
597 	__in			uint32_t tcp_seq,
598 	__in			uint16_t tcp_mss,
599 	__out_ecount(count)	efx_desc_t *edp,
600 	__in			int count)
601 {
602 	EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,
603 		    uint16_t, ipv4_id, uint32_t, tcp_seq,
604 		    uint16_t, tcp_mss);
605 
606 	EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
607 
608 	EFX_POPULATE_QWORD_5(edp[0].ed_eq,
609 			    ESF_DZ_TX_DESC_IS_OPT, 1,
610 			    ESF_DZ_TX_OPTION_TYPE,
611 			    ESE_DZ_TX_OPTION_DESC_TSO,
612 			    ESF_DZ_TX_TSO_OPTION_TYPE,
613 			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
614 			    ESF_DZ_TX_TSO_IP_ID, ipv4_id,
615 			    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
616 	EFX_POPULATE_QWORD_4(edp[1].ed_eq,
617 			    ESF_DZ_TX_DESC_IS_OPT, 1,
618 			    ESF_DZ_TX_OPTION_TYPE,
619 			    ESE_DZ_TX_OPTION_DESC_TSO,
620 			    ESF_DZ_TX_TSO_OPTION_TYPE,
621 			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
622 			    ESF_DZ_TX_TSO_TCP_MSS, tcp_mss);
623 }
624 
625 	void
626 ef10_tx_qdesc_vlantci_create(
627 	__in	efx_txq_t *etp,
628 	__in	uint16_t  tci,
629 	__out	efx_desc_t *edp)
630 {
631 	EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,
632 		    uint16_t, tci);
633 
634 	EFX_POPULATE_QWORD_4(edp->ed_eq,
635 			    ESF_DZ_TX_DESC_IS_OPT, 1,
636 			    ESF_DZ_TX_OPTION_TYPE,
637 			    ESE_DZ_TX_OPTION_DESC_VLAN,
638 			    ESF_DZ_TX_VLAN_OP, tci ? 1 : 0,
639 			    ESF_DZ_TX_VLAN_TAG1, tci);
640 }
641 
642 
643 	__checkReturn	efx_rc_t
644 ef10_tx_qpace(
645 	__in		efx_txq_t *etp,
646 	__in		unsigned int ns)
647 {
648 	efx_rc_t rc;
649 
650 	/* FIXME */
651 	_NOTE(ARGUNUSED(etp, ns))
652 	_NOTE(CONSTANTCONDITION)
653 	if (B_FALSE) {
654 		rc = ENOTSUP;
655 		goto fail1;
656 	}
657 	/* FIXME */
658 
659 	return (0);
660 
661 fail1:
662 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
663 
664 	return (rc);
665 }
666 
667 	__checkReturn	efx_rc_t
668 ef10_tx_qflush(
669 	__in		efx_txq_t *etp)
670 {
671 	efx_nic_t *enp = etp->et_enp;
672 	efx_rc_t rc;
673 
674 	if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0)
675 		goto fail1;
676 
677 	return (0);
678 
679 fail1:
680 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
681 
682 	return (rc);
683 }
684 
685 			void
686 ef10_tx_qenable(
687 	__in		efx_txq_t *etp)
688 {
689 	/* FIXME */
690 	_NOTE(ARGUNUSED(etp))
691 	/* FIXME */
692 }
693 
694 #if EFSYS_OPT_QSTATS
695 			void
696 ef10_tx_qstats_update(
697 	__in				efx_txq_t *etp,
698 	__inout_ecount(TX_NQSTATS)	efsys_stat_t *stat)
699 {
700 	unsigned int id;
701 
702 	for (id = 0; id < TX_NQSTATS; id++) {
703 		efsys_stat_t *essp = &stat[id];
704 
705 		EFSYS_STAT_INCR(essp, etp->et_stat[id]);
706 		etp->et_stat[id] = 0;
707 	}
708 }
709 
710 #endif /* EFSYS_OPT_QSTATS */
711 
712 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
713