xref: /freebsd/sys/dev/sfxge/common/ef10_tx.c (revision d34048812292b714a0bf99967270d18fe3097c62)
1 /*-
2  * Copyright (c) 2012-2016 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright notice,
11  *    this list of conditions and the following disclaimer in the documentation
12  *    and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * The views and conclusions contained in the software and documentation are
27  * those of the authors and should not be interpreted as representing official
28  * policies, either expressed or implied, of the FreeBSD Project.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "efx.h"
35 #include "efx_impl.h"
36 
37 
38 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
39 
40 #if EFSYS_OPT_QSTATS
41 #define	EFX_TX_QSTAT_INCR(_etp, _stat)					\
42 	do {								\
43 		(_etp)->et_stat[_stat]++;				\
44 	_NOTE(CONSTANTCONDITION)					\
45 	} while (B_FALSE)
46 #else
47 #define	EFX_TX_QSTAT_INCR(_etp, _stat)
48 #endif
49 
50 static	__checkReturn	efx_rc_t
51 efx_mcdi_init_txq(
52 	__in		efx_nic_t *enp,
53 	__in		uint32_t ndescs,
54 	__in		uint32_t target_evq,
55 	__in		uint32_t label,
56 	__in		uint32_t instance,
57 	__in		uint16_t flags,
58 	__in		efsys_mem_t *esmp)
59 {
60 	efx_mcdi_req_t req;
61 	uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
62 			    MC_CMD_INIT_TXQ_OUT_LEN)];
63 	efx_qword_t *dma_addr;
64 	uint64_t addr;
65 	int npages;
66 	int i;
67 	efx_rc_t rc;
68 
69 	EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
70 	    EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs));
71 
72 	if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_TXQ_SIZE(ndescs))) {
73 		rc = EINVAL;
74 		goto fail1;
75 	}
76 
77 	npages = EFX_TXQ_NBUFS(ndescs);
78 	if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) {
79 		rc = EINVAL;
80 		goto fail2;
81 	}
82 
83 	(void) memset(payload, 0, sizeof (payload));
84 	req.emr_cmd = MC_CMD_INIT_TXQ;
85 	req.emr_in_buf = payload;
86 	req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
87 	req.emr_out_buf = payload;
88 	req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
89 
90 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, ndescs);
91 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
92 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
93 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
94 
95 	MCDI_IN_POPULATE_DWORD_9(req, INIT_TXQ_IN_FLAGS,
96 	    INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
97 	    INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
98 	    (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
99 	    INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
100 	    (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
101 	    INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN,
102 	    (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0,
103 	    INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN,
104 	    (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0,
105 	    INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
106 	    INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
107 	    INIT_TXQ_IN_CRC_MODE, 0,
108 	    INIT_TXQ_IN_FLAG_TIMESTAMP, 0);
109 
110 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
111 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
112 
113 	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
114 	addr = EFSYS_MEM_ADDR(esmp);
115 
116 	for (i = 0; i < npages; i++) {
117 		EFX_POPULATE_QWORD_2(*dma_addr,
118 		    EFX_DWORD_1, (uint32_t)(addr >> 32),
119 		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
120 
121 		dma_addr++;
122 		addr += EFX_BUF_SIZE;
123 	}
124 
125 	efx_mcdi_execute(enp, &req);
126 
127 	if (req.emr_rc != 0) {
128 		rc = req.emr_rc;
129 		goto fail3;
130 	}
131 
132 	return (0);
133 
134 fail3:
135 	EFSYS_PROBE(fail3);
136 fail2:
137 	EFSYS_PROBE(fail2);
138 fail1:
139 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
140 
141 	return (rc);
142 }
143 
144 static	__checkReturn	efx_rc_t
145 efx_mcdi_fini_txq(
146 	__in		efx_nic_t *enp,
147 	__in		uint32_t instance)
148 {
149 	efx_mcdi_req_t req;
150 	uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN,
151 			    MC_CMD_FINI_TXQ_OUT_LEN)];
152 	efx_rc_t rc;
153 
154 	(void) memset(payload, 0, sizeof (payload));
155 	req.emr_cmd = MC_CMD_FINI_TXQ;
156 	req.emr_in_buf = payload;
157 	req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
158 	req.emr_out_buf = payload;
159 	req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;
160 
161 	MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
162 
163 	efx_mcdi_execute_quiet(enp, &req);
164 
165 	if (req.emr_rc != 0) {
166 		rc = req.emr_rc;
167 		goto fail1;
168 	}
169 
170 	return (0);
171 
172 fail1:
173 	/*
174 	 * EALREADY is not an error, but indicates that the MC has rebooted and
175 	 * that the TXQ has already been destroyed.
176 	 */
177 	if (rc != EALREADY)
178 		EFSYS_PROBE1(fail1, efx_rc_t, rc);
179 
180 	return (rc);
181 }
182 
183 	__checkReturn	efx_rc_t
184 ef10_tx_init(
185 	__in		efx_nic_t *enp)
186 {
187 	_NOTE(ARGUNUSED(enp))
188 	return (0);
189 }
190 
191 			void
192 ef10_tx_fini(
193 	__in		efx_nic_t *enp)
194 {
195 	_NOTE(ARGUNUSED(enp))
196 }
197 
198 	__checkReturn	efx_rc_t
199 ef10_tx_qcreate(
200 	__in		efx_nic_t *enp,
201 	__in		unsigned int index,
202 	__in		unsigned int label,
203 	__in		efsys_mem_t *esmp,
204 	__in		size_t ndescs,
205 	__in		uint32_t id,
206 	__in		uint16_t flags,
207 	__in		efx_evq_t *eep,
208 	__in		efx_txq_t *etp,
209 	__out		unsigned int *addedp)
210 {
211 	efx_nic_cfg_t *encp = &enp->en_nic_cfg;
212 	uint16_t inner_csum;
213 	efx_desc_t desc;
214 	efx_rc_t rc;
215 
216 	_NOTE(ARGUNUSED(id))
217 
218 	inner_csum = EFX_TXQ_CKSUM_INNER_IPV4 | EFX_TXQ_CKSUM_INNER_TCPUDP;
219 	if (((flags & inner_csum) != 0) &&
220 	    (encp->enc_tunnel_encapsulations_supported == 0)) {
221 		rc = EINVAL;
222 		goto fail1;
223 	}
224 
225 	if ((rc = efx_mcdi_init_txq(enp, ndescs, eep->ee_index, label, index,
226 	    flags, esmp)) != 0)
227 		goto fail2;
228 
229 	/*
230 	 * A previous user of this TX queue may have written a descriptor to the
231 	 * TX push collector, but not pushed the doorbell (e.g. after a crash).
232 	 * The next doorbell write would then push the stale descriptor.
233 	 *
234 	 * Ensure the (per network port) TX push collector is cleared by writing
235 	 * a no-op TX option descriptor. See bug29981 for details.
236 	 */
237 	*addedp = 1;
238 	ef10_tx_qdesc_checksum_create(etp, flags, &desc);
239 
240 	EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc.ed_eq);
241 	ef10_tx_qpush(etp, *addedp, 0);
242 
243 	return (0);
244 
245 fail2:
246 	EFSYS_PROBE(fail2);
247 fail1:
248 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
249 
250 	return (rc);
251 }
252 
253 		void
254 ef10_tx_qdestroy(
255 	__in	efx_txq_t *etp)
256 {
257 	/* FIXME */
258 	_NOTE(ARGUNUSED(etp))
259 	/* FIXME */
260 }
261 
262 	__checkReturn	efx_rc_t
263 ef10_tx_qpio_enable(
264 	__in		efx_txq_t *etp)
265 {
266 	efx_nic_t *enp = etp->et_enp;
267 	efx_piobuf_handle_t handle;
268 	efx_rc_t rc;
269 
270 	if (etp->et_pio_size != 0) {
271 		rc = EALREADY;
272 		goto fail1;
273 	}
274 
275 	/* Sub-allocate a PIO block from a piobuf */
276 	if ((rc = ef10_nic_pio_alloc(enp,
277 		    &etp->et_pio_bufnum,
278 		    &handle,
279 		    &etp->et_pio_blknum,
280 		    &etp->et_pio_offset,
281 		    &etp->et_pio_size)) != 0) {
282 		goto fail2;
283 	}
284 	EFSYS_ASSERT3U(etp->et_pio_size, !=, 0);
285 
286 	/* Link the piobuf to this TXQ */
287 	if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) {
288 		goto fail3;
289 	}
290 
291 	/*
292 	 * et_pio_offset is the offset of the sub-allocated block within the
293 	 * hardware PIO buffer. It is used as the buffer address in the PIO
294 	 * option descriptor.
295 	 *
296 	 * et_pio_write_offset is the offset of the sub-allocated block from the
297 	 * start of the write-combined memory mapping, and is used for writing
298 	 * data into the PIO buffer.
299 	 */
300 	etp->et_pio_write_offset =
301 	    (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) +
302 	    ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset;
303 
304 	return (0);
305 
306 fail3:
307 	EFSYS_PROBE(fail3);
308 	ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
309 fail2:
310 	EFSYS_PROBE(fail2);
311 	etp->et_pio_size = 0;
312 fail1:
313 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
314 
315 	return (rc);
316 }
317 
318 			void
319 ef10_tx_qpio_disable(
320 	__in		efx_txq_t *etp)
321 {
322 	efx_nic_t *enp = etp->et_enp;
323 
324 	if (etp->et_pio_size != 0) {
325 		/* Unlink the piobuf from this TXQ */
326 		ef10_nic_pio_unlink(enp, etp->et_index);
327 
328 		/* Free the sub-allocated PIO block */
329 		ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
330 		etp->et_pio_size = 0;
331 		etp->et_pio_write_offset = 0;
332 	}
333 }
334 
335 	__checkReturn	efx_rc_t
336 ef10_tx_qpio_write(
337 	__in			efx_txq_t *etp,
338 	__in_ecount(length)	uint8_t *buffer,
339 	__in			size_t length,
340 	__in			size_t offset)
341 {
342 	efx_nic_t *enp = etp->et_enp;
343 	efsys_bar_t *esbp = enp->en_esbp;
344 	uint32_t write_offset;
345 	uint32_t write_offset_limit;
346 	efx_qword_t *eqp;
347 	efx_rc_t rc;
348 
349 	EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0);
350 
351 	if (etp->et_pio_size == 0) {
352 		rc = ENOENT;
353 		goto fail1;
354 	}
355 	if (offset + length > etp->et_pio_size)	{
356 		rc = ENOSPC;
357 		goto fail2;
358 	}
359 
360 	/*
361 	 * Writes to PIO buffers must be 64 bit aligned, and multiples of
362 	 * 64 bits.
363 	 */
364 	write_offset = etp->et_pio_write_offset + offset;
365 	write_offset_limit = write_offset + length;
366 	eqp = (efx_qword_t *)buffer;
367 	while (write_offset < write_offset_limit) {
368 		EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp);
369 		eqp++;
370 		write_offset += sizeof (efx_qword_t);
371 	}
372 
373 	return (0);
374 
375 fail2:
376 	EFSYS_PROBE(fail2);
377 fail1:
378 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
379 
380 	return (rc);
381 }
382 
383 	__checkReturn	efx_rc_t
384 ef10_tx_qpio_post(
385 	__in			efx_txq_t *etp,
386 	__in			size_t pkt_length,
387 	__in			unsigned int completed,
388 	__inout			unsigned int *addedp)
389 {
390 	efx_qword_t pio_desc;
391 	unsigned int id;
392 	size_t offset;
393 	unsigned int added = *addedp;
394 	efx_rc_t rc;
395 
396 
397 	if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
398 		rc = ENOSPC;
399 		goto fail1;
400 	}
401 
402 	if (etp->et_pio_size == 0) {
403 		rc = ENOENT;
404 		goto fail2;
405 	}
406 
407 	id = added++ & etp->et_mask;
408 	offset = id * sizeof (efx_qword_t);
409 
410 	EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index,
411 		    unsigned int, id, uint32_t, etp->et_pio_offset,
412 		    size_t, pkt_length);
413 
414 	EFX_POPULATE_QWORD_5(pio_desc,
415 			ESF_DZ_TX_DESC_IS_OPT, 1,
416 			ESF_DZ_TX_OPTION_TYPE, 1,
417 			ESF_DZ_TX_PIO_CONT, 0,
418 			ESF_DZ_TX_PIO_BYTE_CNT, pkt_length,
419 			ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset);
420 
421 	EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc);
422 
423 	EFX_TX_QSTAT_INCR(etp, TX_POST_PIO);
424 
425 	*addedp = added;
426 	return (0);
427 
428 fail2:
429 	EFSYS_PROBE(fail2);
430 fail1:
431 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
432 
433 	return (rc);
434 }
435 
436 	__checkReturn		efx_rc_t
437 ef10_tx_qpost(
438 	__in			efx_txq_t *etp,
439 	__in_ecount(ndescs)	efx_buffer_t *eb,
440 	__in			unsigned int ndescs,
441 	__in			unsigned int completed,
442 	__inout			unsigned int *addedp)
443 {
444 	unsigned int added = *addedp;
445 	unsigned int i;
446 	efx_rc_t rc;
447 
448 	if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
449 		rc = ENOSPC;
450 		goto fail1;
451 	}
452 
453 	for (i = 0; i < ndescs; i++) {
454 		efx_buffer_t *ebp = &eb[i];
455 		efsys_dma_addr_t addr = ebp->eb_addr;
456 		size_t size = ebp->eb_size;
457 		boolean_t eop = ebp->eb_eop;
458 		unsigned int id;
459 		size_t offset;
460 		efx_qword_t qword;
461 
462 		/* No limitations on boundary crossing */
463 		EFSYS_ASSERT(size <=
464 		    etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
465 
466 		id = added++ & etp->et_mask;
467 		offset = id * sizeof (efx_qword_t);
468 
469 		EFSYS_PROBE5(tx_post, unsigned int, etp->et_index,
470 		    unsigned int, id, efsys_dma_addr_t, addr,
471 		    size_t, size, boolean_t, eop);
472 
473 		EFX_POPULATE_QWORD_5(qword,
474 		    ESF_DZ_TX_KER_TYPE, 0,
475 		    ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
476 		    ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
477 		    ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
478 		    ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
479 
480 		EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword);
481 	}
482 
483 	EFX_TX_QSTAT_INCR(etp, TX_POST);
484 
485 	*addedp = added;
486 	return (0);
487 
488 fail1:
489 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
490 
491 	return (rc);
492 }
493 
494 /*
495  * This improves performance by, when possible, pushing a TX descriptor at the
496  * same time as the doorbell. The descriptor must be added to the TXQ, so that
497  * can be used if the hardware decides not to use the pushed descriptor.
498  */
499 			void
500 ef10_tx_qpush(
501 	__in		efx_txq_t *etp,
502 	__in		unsigned int added,
503 	__in		unsigned int pushed)
504 {
505 	efx_nic_t *enp = etp->et_enp;
506 	unsigned int wptr;
507 	unsigned int id;
508 	size_t offset;
509 	efx_qword_t desc;
510 	efx_oword_t oword;
511 
512 	wptr = added & etp->et_mask;
513 	id = pushed & etp->et_mask;
514 	offset = id * sizeof (efx_qword_t);
515 
516 	EFSYS_MEM_READQ(etp->et_esmp, offset, &desc);
517 
518 	/*
519 	 * SF Bug 65776: TSO option descriptors cannot be pushed if pacer bypass
520 	 * is enabled on the event queue this transmit queue is attached to.
521 	 *
522 	 * To ensure the code is safe, it is easiest to simply test the type of
523 	 * the descriptor to push, and only push it is if it not a TSO option
524 	 * descriptor.
525 	 */
526 	if ((EFX_QWORD_FIELD(desc, ESF_DZ_TX_DESC_IS_OPT) != 1) ||
527 	    (EFX_QWORD_FIELD(desc, ESF_DZ_TX_OPTION_TYPE) !=
528 	    ESE_DZ_TX_OPTION_DESC_TSO)) {
529 		/* Push the descriptor and update the wptr. */
530 		EFX_POPULATE_OWORD_3(oword, ERF_DZ_TX_DESC_WPTR, wptr,
531 		    ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
532 		    ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
533 
534 		/* Ensure ordering of memory (descriptors) and PIO (doorbell) */
535 		EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
536 					    wptr, id);
537 		EFSYS_PIO_WRITE_BARRIER();
538 		EFX_BAR_VI_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG,
539 		    etp->et_index, &oword);
540 	} else {
541 		efx_dword_t dword;
542 
543 		/*
544 		 * Only update the wptr. This is signalled to the hardware by
545 		 * only writing one DWORD of the doorbell register.
546 		 */
547 		EFX_POPULATE_OWORD_1(oword, ERF_DZ_TX_DESC_WPTR, wptr);
548 		dword = oword.eo_dword[2];
549 
550 		/* Ensure ordering of memory (descriptors) and PIO (doorbell) */
551 		EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
552 					    wptr, id);
553 		EFSYS_PIO_WRITE_BARRIER();
554 		EFX_BAR_VI_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG,
555 		    etp->et_index, &dword, B_FALSE);
556 	}
557 }
558 
559 	__checkReturn		efx_rc_t
560 ef10_tx_qdesc_post(
561 	__in			efx_txq_t *etp,
562 	__in_ecount(ndescs)	efx_desc_t *ed,
563 	__in			unsigned int ndescs,
564 	__in			unsigned int completed,
565 	__inout			unsigned int *addedp)
566 {
567 	unsigned int added = *addedp;
568 	unsigned int i;
569 	efx_rc_t rc;
570 
571 	if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
572 		rc = ENOSPC;
573 		goto fail1;
574 	}
575 
576 	for (i = 0; i < ndescs; i++) {
577 		efx_desc_t *edp = &ed[i];
578 		unsigned int id;
579 		size_t offset;
580 
581 		id = added++ & etp->et_mask;
582 		offset = id * sizeof (efx_desc_t);
583 
584 		EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
585 	}
586 
587 	EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
588 		    unsigned int, added, unsigned int, ndescs);
589 
590 	EFX_TX_QSTAT_INCR(etp, TX_POST);
591 
592 	*addedp = added;
593 	return (0);
594 
595 fail1:
596 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
597 
598 	return (rc);
599 }
600 
601 	void
602 ef10_tx_qdesc_dma_create(
603 	__in	efx_txq_t *etp,
604 	__in	efsys_dma_addr_t addr,
605 	__in	size_t size,
606 	__in	boolean_t eop,
607 	__out	efx_desc_t *edp)
608 {
609 	_NOTE(ARGUNUSED(etp))
610 
611 	/* No limitations on boundary crossing */
612 	EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
613 
614 	EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
615 		    efsys_dma_addr_t, addr,
616 		    size_t, size, boolean_t, eop);
617 
618 	EFX_POPULATE_QWORD_5(edp->ed_eq,
619 		    ESF_DZ_TX_KER_TYPE, 0,
620 		    ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
621 		    ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
622 		    ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
623 		    ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
624 }
625 
626 	void
627 ef10_tx_qdesc_tso_create(
628 	__in	efx_txq_t *etp,
629 	__in	uint16_t ipv4_id,
630 	__in	uint32_t tcp_seq,
631 	__in	uint8_t  tcp_flags,
632 	__out	efx_desc_t *edp)
633 {
634 	_NOTE(ARGUNUSED(etp))
635 
636 	EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,
637 		    uint16_t, ipv4_id, uint32_t, tcp_seq,
638 		    uint8_t, tcp_flags);
639 
640 	EFX_POPULATE_QWORD_5(edp->ed_eq,
641 			    ESF_DZ_TX_DESC_IS_OPT, 1,
642 			    ESF_DZ_TX_OPTION_TYPE,
643 			    ESE_DZ_TX_OPTION_DESC_TSO,
644 			    ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
645 			    ESF_DZ_TX_TSO_IP_ID, ipv4_id,
646 			    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
647 }
648 
649 	void
650 ef10_tx_qdesc_tso2_create(
651 	__in			efx_txq_t *etp,
652 	__in			uint16_t ipv4_id,
653 	__in			uint32_t tcp_seq,
654 	__in			uint16_t tcp_mss,
655 	__out_ecount(count)	efx_desc_t *edp,
656 	__in			int count)
657 {
658 	_NOTE(ARGUNUSED(etp, count))
659 
660 	EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,
661 		    uint16_t, ipv4_id, uint32_t, tcp_seq,
662 		    uint16_t, tcp_mss);
663 
664 	EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
665 
666 	EFX_POPULATE_QWORD_5(edp[0].ed_eq,
667 			    ESF_DZ_TX_DESC_IS_OPT, 1,
668 			    ESF_DZ_TX_OPTION_TYPE,
669 			    ESE_DZ_TX_OPTION_DESC_TSO,
670 			    ESF_DZ_TX_TSO_OPTION_TYPE,
671 			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
672 			    ESF_DZ_TX_TSO_IP_ID, ipv4_id,
673 			    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
674 	EFX_POPULATE_QWORD_4(edp[1].ed_eq,
675 			    ESF_DZ_TX_DESC_IS_OPT, 1,
676 			    ESF_DZ_TX_OPTION_TYPE,
677 			    ESE_DZ_TX_OPTION_DESC_TSO,
678 			    ESF_DZ_TX_TSO_OPTION_TYPE,
679 			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
680 			    ESF_DZ_TX_TSO_TCP_MSS, tcp_mss);
681 }
682 
683 	void
684 ef10_tx_qdesc_vlantci_create(
685 	__in	efx_txq_t *etp,
686 	__in	uint16_t  tci,
687 	__out	efx_desc_t *edp)
688 {
689 	_NOTE(ARGUNUSED(etp))
690 
691 	EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,
692 		    uint16_t, tci);
693 
694 	EFX_POPULATE_QWORD_4(edp->ed_eq,
695 			    ESF_DZ_TX_DESC_IS_OPT, 1,
696 			    ESF_DZ_TX_OPTION_TYPE,
697 			    ESE_DZ_TX_OPTION_DESC_VLAN,
698 			    ESF_DZ_TX_VLAN_OP, tci ? 1 : 0,
699 			    ESF_DZ_TX_VLAN_TAG1, tci);
700 }
701 
702 	void
703 ef10_tx_qdesc_checksum_create(
704 	__in	efx_txq_t *etp,
705 	__in	uint16_t flags,
706 	__out	efx_desc_t *edp)
707 {
708 	_NOTE(ARGUNUSED(etp));
709 
710 	EFSYS_PROBE2(tx_desc_checksum_create, unsigned int, etp->et_index,
711 		    uint32_t, flags);
712 
713 	EFX_POPULATE_QWORD_6(edp->ed_eq,
714 	    ESF_DZ_TX_DESC_IS_OPT, 1,
715 	    ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
716 	    ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
717 	    (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
718 	    ESF_DZ_TX_OPTION_IP_CSUM,
719 	    (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0,
720 	    ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM,
721 	    (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0,
722 	    ESF_DZ_TX_OPTION_INNER_IP_CSUM,
723 	    (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0);
724 }
725 
726 
727 	__checkReturn	efx_rc_t
728 ef10_tx_qpace(
729 	__in		efx_txq_t *etp,
730 	__in		unsigned int ns)
731 {
732 	efx_rc_t rc;
733 
734 	/* FIXME */
735 	_NOTE(ARGUNUSED(etp, ns))
736 	_NOTE(CONSTANTCONDITION)
737 	if (B_FALSE) {
738 		rc = ENOTSUP;
739 		goto fail1;
740 	}
741 	/* FIXME */
742 
743 	return (0);
744 
745 fail1:
746 	/*
747 	 * EALREADY is not an error, but indicates that the MC has rebooted and
748 	 * that the TXQ has already been destroyed. Callers need to know that
749 	 * the TXQ flush has completed to avoid waiting until timeout for a
750 	 * flush done event that will not be delivered.
751 	 */
752 	if (rc != EALREADY)
753 		EFSYS_PROBE1(fail1, efx_rc_t, rc);
754 
755 	return (rc);
756 }
757 
758 	__checkReturn	efx_rc_t
759 ef10_tx_qflush(
760 	__in		efx_txq_t *etp)
761 {
762 	efx_nic_t *enp = etp->et_enp;
763 	efx_rc_t rc;
764 
765 	if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0)
766 		goto fail1;
767 
768 	return (0);
769 
770 fail1:
771 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
772 
773 	return (rc);
774 }
775 
776 			void
777 ef10_tx_qenable(
778 	__in		efx_txq_t *etp)
779 {
780 	/* FIXME */
781 	_NOTE(ARGUNUSED(etp))
782 	/* FIXME */
783 }
784 
785 #if EFSYS_OPT_QSTATS
786 			void
787 ef10_tx_qstats_update(
788 	__in				efx_txq_t *etp,
789 	__inout_ecount(TX_NQSTATS)	efsys_stat_t *stat)
790 {
791 	unsigned int id;
792 
793 	for (id = 0; id < TX_NQSTATS; id++) {
794 		efsys_stat_t *essp = &stat[id];
795 
796 		EFSYS_STAT_INCR(essp, etp->et_stat[id]);
797 		etp->et_stat[id] = 0;
798 	}
799 }
800 
801 #endif /* EFSYS_OPT_QSTATS */
802 
803 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
804