xref: /freebsd/sys/dev/sfxge/common/ef10_tx.c (revision cab6a39d7b343596a5823e65c0f7b426551ec22d)
1 /*-
2  * Copyright (c) 2012-2016 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright notice,
11  *    this list of conditions and the following disclaimer in the documentation
12  *    and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * The views and conclusions contained in the software and documentation are
27  * those of the authors and should not be interpreted as representing official
28  * policies, either expressed or implied, of the FreeBSD Project.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "efx.h"
35 #include "efx_impl.h"
36 
37 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
38 
39 #if EFSYS_OPT_QSTATS
40 #define	EFX_TX_QSTAT_INCR(_etp, _stat)					\
41 	do {								\
42 		(_etp)->et_stat[_stat]++;				\
43 	_NOTE(CONSTANTCONDITION)					\
44 	} while (B_FALSE)
45 #else
46 #define	EFX_TX_QSTAT_INCR(_etp, _stat)
47 #endif
48 
49 static	__checkReturn	efx_rc_t
50 efx_mcdi_init_txq(
51 	__in		efx_nic_t *enp,
52 	__in		uint32_t ndescs,
53 	__in		uint32_t target_evq,
54 	__in		uint32_t label,
55 	__in		uint32_t instance,
56 	__in		uint16_t flags,
57 	__in		efsys_mem_t *esmp)
58 {
59 	efx_mcdi_req_t req;
60 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
61 		MC_CMD_INIT_TXQ_OUT_LEN);
62 	efx_qword_t *dma_addr;
63 	uint64_t addr;
64 	int npages;
65 	int i;
66 	efx_rc_t rc;
67 
68 	EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
69 	    EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs));
70 
71 	if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_TXQ_SIZE(ndescs))) {
72 		rc = EINVAL;
73 		goto fail1;
74 	}
75 
76 	npages = EFX_TXQ_NBUFS(ndescs);
77 	if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) {
78 		rc = EINVAL;
79 		goto fail2;
80 	}
81 
82 	req.emr_cmd = MC_CMD_INIT_TXQ;
83 	req.emr_in_buf = payload;
84 	req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
85 	req.emr_out_buf = payload;
86 	req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
87 
88 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, ndescs);
89 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
90 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
91 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
92 
93 	MCDI_IN_POPULATE_DWORD_9(req, INIT_TXQ_IN_FLAGS,
94 	    INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
95 	    INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
96 	    (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
97 	    INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
98 	    (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
99 	    INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN,
100 	    (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0,
101 	    INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN,
102 	    (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0,
103 	    INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
104 	    INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
105 	    INIT_TXQ_IN_CRC_MODE, 0,
106 	    INIT_TXQ_IN_FLAG_TIMESTAMP, 0);
107 
108 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
109 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
110 
111 	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
112 	addr = EFSYS_MEM_ADDR(esmp);
113 
114 	for (i = 0; i < npages; i++) {
115 		EFX_POPULATE_QWORD_2(*dma_addr,
116 		    EFX_DWORD_1, (uint32_t)(addr >> 32),
117 		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
118 
119 		dma_addr++;
120 		addr += EFX_BUF_SIZE;
121 	}
122 
123 	efx_mcdi_execute(enp, &req);
124 
125 	if (req.emr_rc != 0) {
126 		rc = req.emr_rc;
127 		goto fail3;
128 	}
129 
130 	return (0);
131 
132 fail3:
133 	EFSYS_PROBE(fail3);
134 fail2:
135 	EFSYS_PROBE(fail2);
136 fail1:
137 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
138 
139 	return (rc);
140 }
141 
142 static	__checkReturn	efx_rc_t
143 efx_mcdi_fini_txq(
144 	__in		efx_nic_t *enp,
145 	__in		uint32_t instance)
146 {
147 	efx_mcdi_req_t req;
148 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_TXQ_IN_LEN,
149 		MC_CMD_FINI_TXQ_OUT_LEN);
150 	efx_rc_t rc;
151 
152 	req.emr_cmd = MC_CMD_FINI_TXQ;
153 	req.emr_in_buf = payload;
154 	req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
155 	req.emr_out_buf = payload;
156 	req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;
157 
158 	MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
159 
160 	efx_mcdi_execute_quiet(enp, &req);
161 
162 	if (req.emr_rc != 0) {
163 		rc = req.emr_rc;
164 		goto fail1;
165 	}
166 
167 	return (0);
168 
169 fail1:
170 	/*
171 	 * EALREADY is not an error, but indicates that the MC has rebooted and
172 	 * that the TXQ has already been destroyed.
173 	 */
174 	if (rc != EALREADY)
175 		EFSYS_PROBE1(fail1, efx_rc_t, rc);
176 
177 	return (rc);
178 }
179 
180 	__checkReturn	efx_rc_t
181 ef10_tx_init(
182 	__in		efx_nic_t *enp)
183 {
184 	_NOTE(ARGUNUSED(enp))
185 	return (0);
186 }
187 
188 			void
189 ef10_tx_fini(
190 	__in		efx_nic_t *enp)
191 {
192 	_NOTE(ARGUNUSED(enp))
193 }
194 
195 	__checkReturn	efx_rc_t
196 ef10_tx_qcreate(
197 	__in		efx_nic_t *enp,
198 	__in		unsigned int index,
199 	__in		unsigned int label,
200 	__in		efsys_mem_t *esmp,
201 	__in		size_t ndescs,
202 	__in		uint32_t id,
203 	__in		uint16_t flags,
204 	__in		efx_evq_t *eep,
205 	__in		efx_txq_t *etp,
206 	__out		unsigned int *addedp)
207 {
208 	efx_nic_cfg_t *encp = &enp->en_nic_cfg;
209 	uint16_t inner_csum;
210 	efx_desc_t desc;
211 	efx_rc_t rc;
212 
213 	_NOTE(ARGUNUSED(id))
214 
215 	inner_csum = EFX_TXQ_CKSUM_INNER_IPV4 | EFX_TXQ_CKSUM_INNER_TCPUDP;
216 	if (((flags & inner_csum) != 0) &&
217 	    (encp->enc_tunnel_encapsulations_supported == 0)) {
218 		rc = EINVAL;
219 		goto fail1;
220 	}
221 
222 	if ((rc = efx_mcdi_init_txq(enp, ndescs, eep->ee_index, label, index,
223 	    flags, esmp)) != 0)
224 		goto fail2;
225 
226 	/*
227 	 * A previous user of this TX queue may have written a descriptor to the
228 	 * TX push collector, but not pushed the doorbell (e.g. after a crash).
229 	 * The next doorbell write would then push the stale descriptor.
230 	 *
231 	 * Ensure the (per network port) TX push collector is cleared by writing
232 	 * a no-op TX option descriptor. See bug29981 for details.
233 	 */
234 	*addedp = 1;
235 	ef10_tx_qdesc_checksum_create(etp, flags, &desc);
236 
237 	EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc.ed_eq);
238 	ef10_tx_qpush(etp, *addedp, 0);
239 
240 	return (0);
241 
242 fail2:
243 	EFSYS_PROBE(fail2);
244 fail1:
245 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
246 
247 	return (rc);
248 }
249 
250 		void
251 ef10_tx_qdestroy(
252 	__in	efx_txq_t *etp)
253 {
254 	/* FIXME */
255 	_NOTE(ARGUNUSED(etp))
256 	/* FIXME */
257 }
258 
259 	__checkReturn	efx_rc_t
260 ef10_tx_qpio_enable(
261 	__in		efx_txq_t *etp)
262 {
263 	efx_nic_t *enp = etp->et_enp;
264 	efx_piobuf_handle_t handle;
265 	efx_rc_t rc;
266 
267 	if (etp->et_pio_size != 0) {
268 		rc = EALREADY;
269 		goto fail1;
270 	}
271 
272 	/* Sub-allocate a PIO block from a piobuf */
273 	if ((rc = ef10_nic_pio_alloc(enp,
274 		    &etp->et_pio_bufnum,
275 		    &handle,
276 		    &etp->et_pio_blknum,
277 		    &etp->et_pio_offset,
278 		    &etp->et_pio_size)) != 0) {
279 		goto fail2;
280 	}
281 	EFSYS_ASSERT3U(etp->et_pio_size, !=, 0);
282 
283 	/* Link the piobuf to this TXQ */
284 	if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) {
285 		goto fail3;
286 	}
287 
288 	/*
289 	 * et_pio_offset is the offset of the sub-allocated block within the
290 	 * hardware PIO buffer. It is used as the buffer address in the PIO
291 	 * option descriptor.
292 	 *
293 	 * et_pio_write_offset is the offset of the sub-allocated block from the
294 	 * start of the write-combined memory mapping, and is used for writing
295 	 * data into the PIO buffer.
296 	 */
297 	etp->et_pio_write_offset =
298 	    (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) +
299 	    ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset;
300 
301 	return (0);
302 
303 fail3:
304 	EFSYS_PROBE(fail3);
305 	(void) ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
306 fail2:
307 	EFSYS_PROBE(fail2);
308 	etp->et_pio_size = 0;
309 fail1:
310 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
311 
312 	return (rc);
313 }
314 
315 			void
316 ef10_tx_qpio_disable(
317 	__in		efx_txq_t *etp)
318 {
319 	efx_nic_t *enp = etp->et_enp;
320 
321 	if (etp->et_pio_size != 0) {
322 		/* Unlink the piobuf from this TXQ */
323 		if (ef10_nic_pio_unlink(enp, etp->et_index) != 0)
324 			return;
325 
326 		/* Free the sub-allocated PIO block */
327 		(void) ef10_nic_pio_free(enp, etp->et_pio_bufnum,
328 		    etp->et_pio_blknum);
329 		etp->et_pio_size = 0;
330 		etp->et_pio_write_offset = 0;
331 	}
332 }
333 
334 	__checkReturn	efx_rc_t
335 ef10_tx_qpio_write(
336 	__in			efx_txq_t *etp,
337 	__in_ecount(length)	uint8_t *buffer,
338 	__in			size_t length,
339 	__in			size_t offset)
340 {
341 	efx_nic_t *enp = etp->et_enp;
342 	efsys_bar_t *esbp = enp->en_esbp;
343 	uint32_t write_offset;
344 	uint32_t write_offset_limit;
345 	efx_qword_t *eqp;
346 	efx_rc_t rc;
347 
348 	EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0);
349 
350 	if (etp->et_pio_size == 0) {
351 		rc = ENOENT;
352 		goto fail1;
353 	}
354 	if (offset + length > etp->et_pio_size)	{
355 		rc = ENOSPC;
356 		goto fail2;
357 	}
358 
359 	/*
360 	 * Writes to PIO buffers must be 64 bit aligned, and multiples of
361 	 * 64 bits.
362 	 */
363 	write_offset = etp->et_pio_write_offset + offset;
364 	write_offset_limit = write_offset + length;
365 	eqp = (efx_qword_t *)buffer;
366 	while (write_offset < write_offset_limit) {
367 		EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp);
368 		eqp++;
369 		write_offset += sizeof (efx_qword_t);
370 	}
371 
372 	return (0);
373 
374 fail2:
375 	EFSYS_PROBE(fail2);
376 fail1:
377 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
378 
379 	return (rc);
380 }
381 
382 	__checkReturn	efx_rc_t
383 ef10_tx_qpio_post(
384 	__in			efx_txq_t *etp,
385 	__in			size_t pkt_length,
386 	__in			unsigned int completed,
387 	__inout			unsigned int *addedp)
388 {
389 	efx_qword_t pio_desc;
390 	unsigned int id;
391 	size_t offset;
392 	unsigned int added = *addedp;
393 	efx_rc_t rc;
394 
395 	if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
396 		rc = ENOSPC;
397 		goto fail1;
398 	}
399 
400 	if (etp->et_pio_size == 0) {
401 		rc = ENOENT;
402 		goto fail2;
403 	}
404 
405 	id = added++ & etp->et_mask;
406 	offset = id * sizeof (efx_qword_t);
407 
408 	EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index,
409 		    unsigned int, id, uint32_t, etp->et_pio_offset,
410 		    size_t, pkt_length);
411 
412 	EFX_POPULATE_QWORD_5(pio_desc,
413 			ESF_DZ_TX_DESC_IS_OPT, 1,
414 			ESF_DZ_TX_OPTION_TYPE, 1,
415 			ESF_DZ_TX_PIO_CONT, 0,
416 			ESF_DZ_TX_PIO_BYTE_CNT, pkt_length,
417 			ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset);
418 
419 	EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc);
420 
421 	EFX_TX_QSTAT_INCR(etp, TX_POST_PIO);
422 
423 	*addedp = added;
424 	return (0);
425 
426 fail2:
427 	EFSYS_PROBE(fail2);
428 fail1:
429 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
430 
431 	return (rc);
432 }
433 
434 	__checkReturn		efx_rc_t
435 ef10_tx_qpost(
436 	__in			efx_txq_t *etp,
437 	__in_ecount(ndescs)	efx_buffer_t *eb,
438 	__in			unsigned int ndescs,
439 	__in			unsigned int completed,
440 	__inout			unsigned int *addedp)
441 {
442 	unsigned int added = *addedp;
443 	unsigned int i;
444 	efx_rc_t rc;
445 
446 	if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
447 		rc = ENOSPC;
448 		goto fail1;
449 	}
450 
451 	for (i = 0; i < ndescs; i++) {
452 		efx_buffer_t *ebp = &eb[i];
453 		efsys_dma_addr_t addr = ebp->eb_addr;
454 		size_t size = ebp->eb_size;
455 		boolean_t eop = ebp->eb_eop;
456 		unsigned int id;
457 		size_t offset;
458 		efx_qword_t qword;
459 
460 		/* No limitations on boundary crossing */
461 		EFSYS_ASSERT(size <=
462 		    etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
463 
464 		id = added++ & etp->et_mask;
465 		offset = id * sizeof (efx_qword_t);
466 
467 		EFSYS_PROBE5(tx_post, unsigned int, etp->et_index,
468 		    unsigned int, id, efsys_dma_addr_t, addr,
469 		    size_t, size, boolean_t, eop);
470 
471 		EFX_POPULATE_QWORD_5(qword,
472 		    ESF_DZ_TX_KER_TYPE, 0,
473 		    ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
474 		    ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
475 		    ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
476 		    ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
477 
478 		EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword);
479 	}
480 
481 	EFX_TX_QSTAT_INCR(etp, TX_POST);
482 
483 	*addedp = added;
484 	return (0);
485 
486 fail1:
487 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
488 
489 	return (rc);
490 }
491 
492 /*
493  * This improves performance by, when possible, pushing a TX descriptor at the
494  * same time as the doorbell. The descriptor must be added to the TXQ, so that
495  * can be used if the hardware decides not to use the pushed descriptor.
496  */
497 			void
498 ef10_tx_qpush(
499 	__in		efx_txq_t *etp,
500 	__in		unsigned int added,
501 	__in		unsigned int pushed)
502 {
503 	efx_nic_t *enp = etp->et_enp;
504 	unsigned int wptr;
505 	unsigned int id;
506 	size_t offset;
507 	efx_qword_t desc;
508 	efx_oword_t oword;
509 
510 	wptr = added & etp->et_mask;
511 	id = pushed & etp->et_mask;
512 	offset = id * sizeof (efx_qword_t);
513 
514 	EFSYS_MEM_READQ(etp->et_esmp, offset, &desc);
515 
516 	/*
517 	 * SF Bug 65776: TSO option descriptors cannot be pushed if pacer bypass
518 	 * is enabled on the event queue this transmit queue is attached to.
519 	 *
520 	 * To ensure the code is safe, it is easiest to simply test the type of
521 	 * the descriptor to push, and only push it is if it not a TSO option
522 	 * descriptor.
523 	 */
524 	if ((EFX_QWORD_FIELD(desc, ESF_DZ_TX_DESC_IS_OPT) != 1) ||
525 	    (EFX_QWORD_FIELD(desc, ESF_DZ_TX_OPTION_TYPE) !=
526 	    ESE_DZ_TX_OPTION_DESC_TSO)) {
527 		/* Push the descriptor and update the wptr. */
528 		EFX_POPULATE_OWORD_3(oword, ERF_DZ_TX_DESC_WPTR, wptr,
529 		    ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
530 		    ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
531 
532 		/* Ensure ordering of memory (descriptors) and PIO (doorbell) */
533 		EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
534 					    wptr, id);
535 		EFSYS_PIO_WRITE_BARRIER();
536 		EFX_BAR_VI_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG,
537 		    etp->et_index, &oword);
538 	} else {
539 		efx_dword_t dword;
540 
541 		/*
542 		 * Only update the wptr. This is signalled to the hardware by
543 		 * only writing one DWORD of the doorbell register.
544 		 */
545 		EFX_POPULATE_OWORD_1(oword, ERF_DZ_TX_DESC_WPTR, wptr);
546 		dword = oword.eo_dword[2];
547 
548 		/* Ensure ordering of memory (descriptors) and PIO (doorbell) */
549 		EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
550 					    wptr, id);
551 		EFSYS_PIO_WRITE_BARRIER();
552 		EFX_BAR_VI_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG,
553 		    etp->et_index, &dword, B_FALSE);
554 	}
555 }
556 
557 	__checkReturn		efx_rc_t
558 ef10_tx_qdesc_post(
559 	__in			efx_txq_t *etp,
560 	__in_ecount(ndescs)	efx_desc_t *ed,
561 	__in			unsigned int ndescs,
562 	__in			unsigned int completed,
563 	__inout			unsigned int *addedp)
564 {
565 	unsigned int added = *addedp;
566 	unsigned int i;
567 
568 	if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1))
569 		return (ENOSPC);
570 
571 	for (i = 0; i < ndescs; i++) {
572 		efx_desc_t *edp = &ed[i];
573 		unsigned int id;
574 		size_t offset;
575 
576 		id = added++ & etp->et_mask;
577 		offset = id * sizeof (efx_desc_t);
578 
579 		EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
580 	}
581 
582 	EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
583 		    unsigned int, added, unsigned int, ndescs);
584 
585 	EFX_TX_QSTAT_INCR(etp, TX_POST);
586 
587 	*addedp = added;
588 	return (0);
589 }
590 
591 	void
592 ef10_tx_qdesc_dma_create(
593 	__in	efx_txq_t *etp,
594 	__in	efsys_dma_addr_t addr,
595 	__in	size_t size,
596 	__in	boolean_t eop,
597 	__out	efx_desc_t *edp)
598 {
599 	_NOTE(ARGUNUSED(etp))
600 
601 	/* No limitations on boundary crossing */
602 	EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
603 
604 	EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
605 		    efsys_dma_addr_t, addr,
606 		    size_t, size, boolean_t, eop);
607 
608 	EFX_POPULATE_QWORD_5(edp->ed_eq,
609 		    ESF_DZ_TX_KER_TYPE, 0,
610 		    ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
611 		    ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
612 		    ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
613 		    ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
614 }
615 
616 	void
617 ef10_tx_qdesc_tso_create(
618 	__in	efx_txq_t *etp,
619 	__in	uint16_t ipv4_id,
620 	__in	uint32_t tcp_seq,
621 	__in	uint8_t  tcp_flags,
622 	__out	efx_desc_t *edp)
623 {
624 	_NOTE(ARGUNUSED(etp))
625 
626 	EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,
627 		    uint16_t, ipv4_id, uint32_t, tcp_seq,
628 		    uint8_t, tcp_flags);
629 
630 	EFX_POPULATE_QWORD_5(edp->ed_eq,
631 			    ESF_DZ_TX_DESC_IS_OPT, 1,
632 			    ESF_DZ_TX_OPTION_TYPE,
633 			    ESE_DZ_TX_OPTION_DESC_TSO,
634 			    ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
635 			    ESF_DZ_TX_TSO_IP_ID, ipv4_id,
636 			    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
637 }
638 
639 	void
640 ef10_tx_qdesc_tso2_create(
641 	__in			efx_txq_t *etp,
642 	__in			uint16_t ipv4_id,
643 	__in			uint16_t outer_ipv4_id,
644 	__in			uint32_t tcp_seq,
645 	__in			uint16_t tcp_mss,
646 	__out_ecount(count)	efx_desc_t *edp,
647 	__in			int count)
648 {
649 	_NOTE(ARGUNUSED(etp, count))
650 
651 	EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,
652 		    uint16_t, ipv4_id, uint32_t, tcp_seq,
653 		    uint16_t, tcp_mss);
654 
655 	EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
656 
657 	EFX_POPULATE_QWORD_5(edp[0].ed_eq,
658 			    ESF_DZ_TX_DESC_IS_OPT, 1,
659 			    ESF_DZ_TX_OPTION_TYPE,
660 			    ESE_DZ_TX_OPTION_DESC_TSO,
661 			    ESF_DZ_TX_TSO_OPTION_TYPE,
662 			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
663 			    ESF_DZ_TX_TSO_IP_ID, ipv4_id,
664 			    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
665 	EFX_POPULATE_QWORD_5(edp[1].ed_eq,
666 			    ESF_DZ_TX_DESC_IS_OPT, 1,
667 			    ESF_DZ_TX_OPTION_TYPE,
668 			    ESE_DZ_TX_OPTION_DESC_TSO,
669 			    ESF_DZ_TX_TSO_OPTION_TYPE,
670 			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
671 			    ESF_DZ_TX_TSO_TCP_MSS, tcp_mss,
672 			    ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id);
673 }
674 
675 	void
676 ef10_tx_qdesc_vlantci_create(
677 	__in	efx_txq_t *etp,
678 	__in	uint16_t  tci,
679 	__out	efx_desc_t *edp)
680 {
681 	_NOTE(ARGUNUSED(etp))
682 
683 	EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,
684 		    uint16_t, tci);
685 
686 	EFX_POPULATE_QWORD_4(edp->ed_eq,
687 			    ESF_DZ_TX_DESC_IS_OPT, 1,
688 			    ESF_DZ_TX_OPTION_TYPE,
689 			    ESE_DZ_TX_OPTION_DESC_VLAN,
690 			    ESF_DZ_TX_VLAN_OP, tci ? 1 : 0,
691 			    ESF_DZ_TX_VLAN_TAG1, tci);
692 }
693 
694 	void
695 ef10_tx_qdesc_checksum_create(
696 	__in	efx_txq_t *etp,
697 	__in	uint16_t flags,
698 	__out	efx_desc_t *edp)
699 {
700 	_NOTE(ARGUNUSED(etp));
701 
702 	EFSYS_PROBE2(tx_desc_checksum_create, unsigned int, etp->et_index,
703 		    uint32_t, flags);
704 
705 	EFX_POPULATE_QWORD_6(edp->ed_eq,
706 	    ESF_DZ_TX_DESC_IS_OPT, 1,
707 	    ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
708 	    ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
709 	    (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
710 	    ESF_DZ_TX_OPTION_IP_CSUM,
711 	    (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0,
712 	    ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM,
713 	    (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0,
714 	    ESF_DZ_TX_OPTION_INNER_IP_CSUM,
715 	    (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0);
716 }
717 
718 	__checkReturn	efx_rc_t
719 ef10_tx_qpace(
720 	__in		efx_txq_t *etp,
721 	__in		unsigned int ns)
722 {
723 	efx_rc_t rc;
724 
725 	/* FIXME */
726 	_NOTE(ARGUNUSED(etp, ns))
727 	_NOTE(CONSTANTCONDITION)
728 	if (B_FALSE) {
729 		rc = ENOTSUP;
730 		goto fail1;
731 	}
732 	/* FIXME */
733 
734 	return (0);
735 
736 fail1:
737 	/*
738 	 * EALREADY is not an error, but indicates that the MC has rebooted and
739 	 * that the TXQ has already been destroyed. Callers need to know that
740 	 * the TXQ flush has completed to avoid waiting until timeout for a
741 	 * flush done event that will not be delivered.
742 	 */
743 	if (rc != EALREADY)
744 		EFSYS_PROBE1(fail1, efx_rc_t, rc);
745 
746 	return (rc);
747 }
748 
749 	__checkReturn	efx_rc_t
750 ef10_tx_qflush(
751 	__in		efx_txq_t *etp)
752 {
753 	efx_nic_t *enp = etp->et_enp;
754 	efx_rc_t rc;
755 
756 	if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0)
757 		goto fail1;
758 
759 	return (0);
760 
761 fail1:
762 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
763 
764 	return (rc);
765 }
766 
767 			void
768 ef10_tx_qenable(
769 	__in		efx_txq_t *etp)
770 {
771 	/* FIXME */
772 	_NOTE(ARGUNUSED(etp))
773 	/* FIXME */
774 }
775 
776 #if EFSYS_OPT_QSTATS
777 			void
778 ef10_tx_qstats_update(
779 	__in				efx_txq_t *etp,
780 	__inout_ecount(TX_NQSTATS)	efsys_stat_t *stat)
781 {
782 	unsigned int id;
783 
784 	for (id = 0; id < TX_NQSTATS; id++) {
785 		efsys_stat_t *essp = &stat[id];
786 
787 		EFSYS_STAT_INCR(essp, etp->et_stat[id]);
788 		etp->et_stat[id] = 0;
789 	}
790 }
791 
792 #endif /* EFSYS_OPT_QSTATS */
793 
794 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
795