xref: /illumos-gate/usr/src/uts/common/io/sfxge/common/ef10_tx.c (revision b30d193948be5a7794d7ae3ba0ed9c2f72c88e0f)
1 /*
2  * Copyright (c) 2012-2015 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright notice,
11  *    this list of conditions and the following disclaimer in the documentation
12  *    and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * The views and conclusions contained in the software and documentation are
27  * those of the authors and should not be interpreted as representing official
28  * policies, either expressed or implied, of the FreeBSD Project.
29  */
30 
31 #include "efx.h"
32 #include "efx_impl.h"
33 
34 
35 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
36 
37 #if EFSYS_OPT_QSTATS
38 #define	EFX_TX_QSTAT_INCR(_etp, _stat)					\
39 	do {								\
40 		(_etp)->et_stat[_stat]++;				\
41 	_NOTE(CONSTANTCONDITION)					\
42 	} while (B_FALSE)
43 #else
44 #define	EFX_TX_QSTAT_INCR(_etp, _stat)
45 #endif
46 
47 static	__checkReturn	efx_rc_t
48 efx_mcdi_init_txq(
49 	__in		efx_nic_t *enp,
50 	__in		uint32_t size,
51 	__in		uint32_t target_evq,
52 	__in		uint32_t label,
53 	__in		uint32_t instance,
54 	__in		uint16_t flags,
55 	__in		efsys_mem_t *esmp)
56 {
57 	efx_mcdi_req_t req;
58 	uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
59 			    MC_CMD_INIT_TXQ_OUT_LEN)];
60 	efx_qword_t *dma_addr;
61 	uint64_t addr;
62 	int npages;
63 	int i;
64 	efx_rc_t rc;
65 
66 	EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
67 	    EFX_TXQ_NBUFS(EFX_TXQ_MAXNDESCS(&enp->en_nic_cfg)));
68 
69 	npages = EFX_TXQ_NBUFS(size);
70 	if (npages > MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM) {
71 		rc = EINVAL;
72 		goto fail1;
73 	}
74 
75 	(void) memset(payload, 0, sizeof (payload));
76 	req.emr_cmd = MC_CMD_INIT_TXQ;
77 	req.emr_in_buf = payload;
78 	req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
79 	req.emr_out_buf = payload;
80 	req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
81 
82 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size);
83 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
84 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
85 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
86 
87 	MCDI_IN_POPULATE_DWORD_7(req, INIT_TXQ_IN_FLAGS,
88 	    INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
89 	    INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
90 	    (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
91 	    INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
92 	    (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
93 	    INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
94 	    INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
95 	    INIT_TXQ_IN_CRC_MODE, 0,
96 	    INIT_TXQ_IN_FLAG_TIMESTAMP, 0);
97 
98 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
99 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
100 
101 	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
102 	addr = EFSYS_MEM_ADDR(esmp);
103 
104 	for (i = 0; i < npages; i++) {
105 		EFX_POPULATE_QWORD_2(*dma_addr,
106 		    EFX_DWORD_1, (uint32_t)(addr >> 32),
107 		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
108 
109 		dma_addr++;
110 		addr += EFX_BUF_SIZE;
111 	}
112 
113 	efx_mcdi_execute(enp, &req);
114 
115 	if (req.emr_rc != 0) {
116 		rc = req.emr_rc;
117 		goto fail2;
118 	}
119 
120 	return (0);
121 
122 fail2:
123 	EFSYS_PROBE(fail2);
124 fail1:
125 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
126 
127 	return (rc);
128 }
129 
130 static	__checkReturn	efx_rc_t
131 efx_mcdi_fini_txq(
132 	__in		efx_nic_t *enp,
133 	__in		uint32_t instance)
134 {
135 	efx_mcdi_req_t req;
136 	uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN,
137 			    MC_CMD_FINI_TXQ_OUT_LEN)];
138 	efx_rc_t rc;
139 
140 	(void) memset(payload, 0, sizeof (payload));
141 	req.emr_cmd = MC_CMD_FINI_TXQ;
142 	req.emr_in_buf = payload;
143 	req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
144 	req.emr_out_buf = payload;
145 	req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;
146 
147 	MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
148 
149 	efx_mcdi_execute(enp, &req);
150 
151 	if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
152 		rc = req.emr_rc;
153 		goto fail1;
154 	}
155 
156 	return (0);
157 
158 fail1:
159 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
160 
161 	return (rc);
162 }
163 
164 	__checkReturn	efx_rc_t
165 ef10_tx_init(
166 	__in		efx_nic_t *enp)
167 {
168 	_NOTE(ARGUNUSED(enp))
169 	return (0);
170 }
171 
172 			void
173 ef10_tx_fini(
174 	__in		efx_nic_t *enp)
175 {
176 	_NOTE(ARGUNUSED(enp))
177 }
178 
179 	__checkReturn	efx_rc_t
180 ef10_tx_qcreate(
181 	__in		efx_nic_t *enp,
182 	__in		unsigned int index,
183 	__in		unsigned int label,
184 	__in		efsys_mem_t *esmp,
185 	__in		size_t n,
186 	__in		uint32_t id,
187 	__in		uint16_t flags,
188 	__in		efx_evq_t *eep,
189 	__in		efx_txq_t *etp,
190 	__out		unsigned int *addedp)
191 {
192 	efx_qword_t desc;
193 	efx_rc_t rc;
194 
195 	_NOTE(ARGUNUSED(id));
196 
197 	if ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags,
198 	    esmp)) != 0)
199 		goto fail1;
200 
201 	/*
202 	 * A previous user of this TX queue may have written a descriptor to the
203 	 * TX push collector, but not pushed the doorbell (e.g. after a crash).
204 	 * The next doorbell write would then push the stale descriptor.
205 	 *
206 	 * Ensure the (per network port) TX push collector is cleared by writing
207 	 * a no-op TX option descriptor. See bug29981 for details.
208 	 */
209 	*addedp = 1;
210 	EFX_POPULATE_QWORD_4(desc,
211 	    ESF_DZ_TX_DESC_IS_OPT, 1,
212 	    ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
213 	    ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
214 	    (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
215 	    ESF_DZ_TX_OPTION_IP_CSUM,
216 	    (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0);
217 
218 	EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc);
219 	ef10_tx_qpush(etp, *addedp, 0);
220 
221 	return (0);
222 
223 fail1:
224 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
225 
226 	return (rc);
227 }
228 
229 		void
230 ef10_tx_qdestroy(
231 	__in	efx_txq_t *etp)
232 {
233 	/* FIXME */
234 	_NOTE(ARGUNUSED(etp))
235 	/* FIXME */
236 }
237 
238 	__checkReturn	efx_rc_t
239 ef10_tx_qpio_enable(
240 	__in		efx_txq_t *etp)
241 {
242 	efx_nic_t *enp = etp->et_enp;
243 	efx_piobuf_handle_t handle;
244 	efx_rc_t rc;
245 
246 	if (etp->et_pio_size != 0) {
247 		rc = EALREADY;
248 		goto fail1;
249 	}
250 
251 	/* Sub-allocate a PIO block from a piobuf */
252 	if ((rc = ef10_nic_pio_alloc(enp,
253 		    &etp->et_pio_bufnum,
254 		    &handle,
255 		    &etp->et_pio_blknum,
256 		    &etp->et_pio_offset,
257 		    &etp->et_pio_size)) != 0) {
258 		goto fail2;
259 	}
260 	EFSYS_ASSERT3U(etp->et_pio_size, !=, 0);
261 
262 	/* Link the piobuf to this TXQ */
263 	if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) {
264 		goto fail3;
265 	}
266 
267 	/*
268 	 * et_pio_offset is the offset of the sub-allocated block within the
269 	 * hardware PIO buffer. It is used as the buffer address in the PIO
270 	 * option descriptor.
271 	 *
272 	 * et_pio_write_offset is the offset of the sub-allocated block from the
273 	 * start of the write-combined memory mapping, and is used for writing
274 	 * data into the PIO buffer.
275 	 */
276 	etp->et_pio_write_offset =
277 	    (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) +
278 	    ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset;
279 
280 	return (0);
281 
282 fail3:
283 	EFSYS_PROBE(fail3);
284 	(void) ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
285 	etp->et_pio_size = 0;
286 fail2:
287 	EFSYS_PROBE(fail2);
288 fail1:
289 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
290 
291 	return (rc);
292 }
293 
294 			void
295 ef10_tx_qpio_disable(
296 	__in		efx_txq_t *etp)
297 {
298 	efx_nic_t *enp = etp->et_enp;
299 
300 	if (etp->et_pio_size != 0) {
301 		/* Unlink the piobuf from this TXQ */
302 		(void) ef10_nic_pio_unlink(enp, etp->et_index);
303 
304 		/* Free the sub-allocated PIO block */
305 		(void) ef10_nic_pio_free(enp, etp->et_pio_bufnum,
306 		    etp->et_pio_blknum);
307 		etp->et_pio_size = 0;
308 		etp->et_pio_write_offset = 0;
309 	}
310 }
311 
312 	__checkReturn	efx_rc_t
313 ef10_tx_qpio_write(
314 	__in			efx_txq_t *etp,
315 	__in_ecount(length)	uint8_t *buffer,
316 	__in			size_t length,
317 	__in			size_t offset)
318 {
319 	efx_nic_t *enp = etp->et_enp;
320 	efsys_bar_t *esbp = enp->en_esbp;
321 	uint32_t write_offset;
322 	uint32_t write_offset_limit;
323 	efx_qword_t *eqp;
324 	efx_rc_t rc;
325 
326 	EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0);
327 
328 	if (etp->et_pio_size == 0) {
329 		rc = ENOENT;
330 		goto fail1;
331 	}
332 	if (offset + length > etp->et_pio_size)	{
333 		rc = ENOSPC;
334 		goto fail2;
335 	}
336 
337 	/*
338 	 * Writes to PIO buffers must be 64 bit aligned, and multiples of
339 	 * 64 bits.
340 	 */
341 	write_offset = etp->et_pio_write_offset + offset;
342 	write_offset_limit = write_offset + length;
343 	eqp = (efx_qword_t *)buffer;
344 	while (write_offset < write_offset_limit) {
345 		EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp);
346 		eqp++;
347 		write_offset += sizeof (efx_qword_t);
348 	}
349 
350 	return (0);
351 
352 fail2:
353 	EFSYS_PROBE(fail2);
354 fail1:
355 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
356 
357 	return (rc);
358 }
359 
360 	__checkReturn	efx_rc_t
361 ef10_tx_qpio_post(
362 	__in			efx_txq_t *etp,
363 	__in			size_t pkt_length,
364 	__in			unsigned int completed,
365 	__inout			unsigned int *addedp)
366 {
367 	efx_qword_t pio_desc;
368 	unsigned int id;
369 	size_t offset;
370 	unsigned int added = *addedp;
371 	efx_rc_t rc;
372 
373 
374 	if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
375 		rc = ENOSPC;
376 		goto fail1;
377 	}
378 
379 	if (etp->et_pio_size == 0) {
380 		rc = ENOENT;
381 		goto fail2;
382 	}
383 
384 	id = added++ & etp->et_mask;
385 	offset = id * sizeof (efx_qword_t);
386 
387 	EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index,
388 		    unsigned int, id, uint32_t, etp->et_pio_offset,
389 		    size_t, pkt_length);
390 
391 	EFX_POPULATE_QWORD_5(pio_desc,
392 			ESF_DZ_TX_DESC_IS_OPT, 1,
393 			ESF_DZ_TX_OPTION_TYPE, 1,
394 			ESF_DZ_TX_PIO_CONT, 0,
395 			ESF_DZ_TX_PIO_BYTE_CNT, pkt_length,
396 			ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset);
397 
398 	EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc);
399 
400 	EFX_TX_QSTAT_INCR(etp, TX_POST_PIO);
401 
402 	*addedp = added;
403 	return (0);
404 
405 fail2:
406 	EFSYS_PROBE(fail2);
407 fail1:
408 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
409 
410 	return (rc);
411 }
412 
413 	__checkReturn	efx_rc_t
414 ef10_tx_qpost(
415 	__in		efx_txq_t *etp,
416 	__in_ecount(n)	efx_buffer_t *eb,
417 	__in		unsigned int n,
418 	__in		unsigned int completed,
419 	__inout		unsigned int *addedp)
420 {
421 	unsigned int added = *addedp;
422 	unsigned int i;
423 	efx_rc_t rc;
424 
425 	if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
426 		rc = ENOSPC;
427 		goto fail1;
428 	}
429 
430 	for (i = 0; i < n; i++) {
431 		efx_buffer_t *ebp = &eb[i];
432 		efsys_dma_addr_t addr = ebp->eb_addr;
433 		size_t size = ebp->eb_size;
434 		boolean_t eop = ebp->eb_eop;
435 		unsigned int id;
436 		size_t offset;
437 		efx_qword_t qword;
438 
439 		/* Fragments must not span 4k boundaries. */
440 		EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= (addr + size));
441 
442 		id = added++ & etp->et_mask;
443 		offset = id * sizeof (efx_qword_t);
444 
445 		EFSYS_PROBE5(tx_post, unsigned int, etp->et_index,
446 		    unsigned int, id, efsys_dma_addr_t, addr,
447 		    size_t, size, boolean_t, eop);
448 
449 		EFX_POPULATE_QWORD_5(qword,
450 		    ESF_DZ_TX_KER_TYPE, 0,
451 		    ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
452 		    ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
453 		    ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
454 		    ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
455 
456 		EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword);
457 	}
458 
459 	EFX_TX_QSTAT_INCR(etp, TX_POST);
460 
461 	*addedp = added;
462 	return (0);
463 
464 fail1:
465 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
466 
467 	return (rc);
468 }
469 
470 /*
471  * This improves performance by pushing a TX descriptor at the same time as the
472  * doorbell. The descriptor must be added to the TXQ, so that can be used if the
473  * hardware decides not to use the pushed descriptor.
474  */
475 			void
476 ef10_tx_qpush(
477 	__in		efx_txq_t *etp,
478 	__in		unsigned int added,
479 	__in		unsigned int pushed)
480 {
481 	efx_nic_t *enp = etp->et_enp;
482 	unsigned int wptr;
483 	unsigned int id;
484 	size_t offset;
485 	efx_qword_t desc;
486 	efx_oword_t oword;
487 
488 	wptr = added & etp->et_mask;
489 	id = pushed & etp->et_mask;
490 	offset = id * sizeof (efx_qword_t);
491 
492 	EFSYS_MEM_READQ(etp->et_esmp, offset, &desc);
493 	EFX_POPULATE_OWORD_3(oword,
494 	    ERF_DZ_TX_DESC_WPTR, wptr,
495 	    ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
496 	    ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
497 
498 	/* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
499 	EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id);
500 	EFSYS_PIO_WRITE_BARRIER();
501 	EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, etp->et_index,
502 				    &oword);
503 }
504 
505 	__checkReturn	efx_rc_t
506 ef10_tx_qdesc_post(
507 	__in		efx_txq_t *etp,
508 	__in_ecount(n)	efx_desc_t *ed,
509 	__in		unsigned int n,
510 	__in		unsigned int completed,
511 	__inout		unsigned int *addedp)
512 {
513 	unsigned int added = *addedp;
514 	unsigned int i;
515 	efx_rc_t rc;
516 
517 	if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
518 		rc = ENOSPC;
519 		goto fail1;
520 	}
521 
522 	for (i = 0; i < n; i++) {
523 		efx_desc_t *edp = &ed[i];
524 		unsigned int id;
525 		size_t offset;
526 
527 		id = added++ & etp->et_mask;
528 		offset = id * sizeof (efx_desc_t);
529 
530 		EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
531 	}
532 
533 	EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
534 		    unsigned int, added, unsigned int, n);
535 
536 	EFX_TX_QSTAT_INCR(etp, TX_POST);
537 
538 	*addedp = added;
539 	return (0);
540 
541 fail1:
542 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
543 
544 	return (rc);
545 }
546 
547 	void
548 ef10_tx_qdesc_dma_create(
549 	__in	efx_txq_t *etp,
550 	__in	efsys_dma_addr_t addr,
551 	__in	size_t size,
552 	__in	boolean_t eop,
553 	__out	efx_desc_t *edp)
554 {
555 	/* Fragments must not span 4k boundaries. */
556 	EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= addr + size);
557 
558 	EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
559 		    efsys_dma_addr_t, addr,
560 		    size_t, size, boolean_t, eop);
561 
562 	EFX_POPULATE_QWORD_5(edp->ed_eq,
563 		    ESF_DZ_TX_KER_TYPE, 0,
564 		    ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
565 		    ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
566 		    ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
567 		    ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
568 }
569 
570 	void
571 ef10_tx_qdesc_tso_create(
572 	__in	efx_txq_t *etp,
573 	__in	uint16_t ipv4_id,
574 	__in	uint32_t tcp_seq,
575 	__in	uint8_t  tcp_flags,
576 	__out	efx_desc_t *edp)
577 {
578 	EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,
579 		    uint16_t, ipv4_id, uint32_t, tcp_seq,
580 		    uint8_t, tcp_flags);
581 
582 	EFX_POPULATE_QWORD_5(edp->ed_eq,
583 			    ESF_DZ_TX_DESC_IS_OPT, 1,
584 			    ESF_DZ_TX_OPTION_TYPE,
585 			    ESE_DZ_TX_OPTION_DESC_TSO,
586 			    ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
587 			    ESF_DZ_TX_TSO_IP_ID, ipv4_id,
588 			    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
589 }
590 
591 	void
592 ef10_tx_qdesc_tso2_create(
593 	__in			efx_txq_t *etp,
594 	__in			uint16_t ipv4_id,
595 	__in			uint32_t tcp_seq,
596 	__in			uint16_t tcp_mss,
597 	__out_ecount(count)	efx_desc_t *edp,
598 	__in			int count)
599 {
600 	EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,
601 		    uint16_t, ipv4_id, uint32_t, tcp_seq,
602 		    uint16_t, tcp_mss);
603 
604 	EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
605 
606 	EFX_POPULATE_QWORD_5(edp[0].ed_eq,
607 			    ESF_DZ_TX_DESC_IS_OPT, 1,
608 			    ESF_DZ_TX_OPTION_TYPE,
609 			    ESE_DZ_TX_OPTION_DESC_TSO,
610 			    ESF_DZ_TX_TSO_OPTION_TYPE,
611 			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
612 			    ESF_DZ_TX_TSO_IP_ID, ipv4_id,
613 			    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
614 	EFX_POPULATE_QWORD_4(edp[1].ed_eq,
615 			    ESF_DZ_TX_DESC_IS_OPT, 1,
616 			    ESF_DZ_TX_OPTION_TYPE,
617 			    ESE_DZ_TX_OPTION_DESC_TSO,
618 			    ESF_DZ_TX_TSO_OPTION_TYPE,
619 			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
620 			    ESF_DZ_TX_TSO_TCP_MSS, tcp_mss);
621 }
622 
623 	void
624 ef10_tx_qdesc_vlantci_create(
625 	__in	efx_txq_t *etp,
626 	__in	uint16_t  tci,
627 	__out	efx_desc_t *edp)
628 {
629 	EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,
630 		    uint16_t, tci);
631 
632 	EFX_POPULATE_QWORD_4(edp->ed_eq,
633 			    ESF_DZ_TX_DESC_IS_OPT, 1,
634 			    ESF_DZ_TX_OPTION_TYPE,
635 			    ESE_DZ_TX_OPTION_DESC_VLAN,
636 			    ESF_DZ_TX_VLAN_OP, tci ? 1 : 0,
637 			    ESF_DZ_TX_VLAN_TAG1, tci);
638 }
639 
640 
641 	__checkReturn	efx_rc_t
642 ef10_tx_qpace(
643 	__in		efx_txq_t *etp,
644 	__in		unsigned int ns)
645 {
646 	efx_rc_t rc;
647 
648 	/* FIXME */
649 	_NOTE(ARGUNUSED(etp, ns))
650 	_NOTE(CONSTANTCONDITION)
651 	if (B_FALSE) {
652 		rc = ENOTSUP;
653 		goto fail1;
654 	}
655 	/* FIXME */
656 
657 	return (0);
658 
659 fail1:
660 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
661 
662 	return (rc);
663 }
664 
665 	__checkReturn	efx_rc_t
666 ef10_tx_qflush(
667 	__in		efx_txq_t *etp)
668 {
669 	efx_nic_t *enp = etp->et_enp;
670 	efx_rc_t rc;
671 
672 	if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0)
673 		goto fail1;
674 
675 	return (0);
676 
677 fail1:
678 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
679 
680 	return (rc);
681 }
682 
683 			void
684 ef10_tx_qenable(
685 	__in		efx_txq_t *etp)
686 {
687 	/* FIXME */
688 	_NOTE(ARGUNUSED(etp))
689 	/* FIXME */
690 }
691 
692 #if EFSYS_OPT_QSTATS
693 			void
694 ef10_tx_qstats_update(
695 	__in				efx_txq_t *etp,
696 	__inout_ecount(TX_NQSTATS)	efsys_stat_t *stat)
697 {
698 	unsigned int id;
699 
700 	for (id = 0; id < TX_NQSTATS; id++) {
701 		efsys_stat_t *essp = &stat[id];
702 
703 		EFSYS_STAT_INCR(essp, etp->et_stat[id]);
704 		etp->et_stat[id] = 0;
705 	}
706 }
707 
708 #endif /* EFSYS_OPT_QSTATS */
709 
710 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
711