xref: /freebsd/sys/dev/aq/aq_ring.c (revision dafba19e42e78cd3d7c9264ece49ddd3d7d70da5)
1 /*
2  * aQuantia Corporation Network Driver
3  * Copyright (C) 2014-2018 aQuantia Corporation. All rights reserved
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  *   (1) Redistributions of source code must retain the above
10  *   copyright notice, this list of conditions and the following
11  *   disclaimer.
12  *
13  *   (2) Redistributions in binary form must reproduce the above
14  *   copyright notice, this list of conditions and the following
15  *   disclaimer in the documentation and/or other materials provided
16  *   with the distribution.
17  *
18  *   (3)The name of the author may not be used to endorse or promote
19  *   products derived from this software without specific prior
20  *   written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
26  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
28  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/bitstring.h>
40 #include <sys/kernel.h>
41 #include <sys/socket.h>
42 #include <machine/param.h>
43 #include <net/ethernet.h>
44 #include <net/if.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_var.h>
48 #include <net/iflib.h>
49 #include <netinet/in.h>
50 
51 #include "aq_common.h"
52 
53 #include "aq_ring.h"
54 #include "aq_dbg.h"
55 #include "aq_device.h"
56 #include "aq_hw.h"
57 #include "aq_hw_llh.h"
58 
59 /* iflib txrx interface prototypes */
60 static int aq_isc_txd_encap(void *arg, if_pkt_info_t pi);
61 static void aq_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
62 static int aq_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
63 static void aq_ring_rx_refill(void* arg, if_rxd_update_t iru);
64 static void aq_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx);
65 static int aq_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget);
66 static int aq_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
67 
68 struct if_txrx aq_txrx = {
69 	.ift_txd_encap = aq_isc_txd_encap,
70 	.ift_txd_flush = aq_isc_txd_flush,
71 	.ift_txd_credits_update = aq_isc_txd_credits_update,
72 	.ift_rxd_available = aq_isc_rxd_available,
73 	.ift_rxd_pkt_get = aq_isc_rxd_pkt_get,
74 	.ift_rxd_refill = aq_ring_rx_refill,
75 	.ift_rxd_flush = aq_isc_rxd_flush,
76 	.ift_legacy_intr = NULL
77 };
78 
79 
80 static inline uint32_t
81 aq_next(uint32_t i, uint32_t lim)
82 {
83 	return (i == lim) ? 0 : i + 1;
84 }
85 
86 int
87 aq_ring_rx_init(struct aq_hw *hw, struct aq_ring *ring)
88 /*                     uint64_t ring_addr,
89                      uint32_t ring_size,
90                      uint32_t ring_idx,
91                      uint32_t interrupt_cause,
92                      uint32_t cpu_idx) */
93 {
94 	int err;
95 	uint32_t dma_desc_addr_lsw = (uint32_t)ring->rx_descs_phys & 0xffffffff;
96 	uint32_t dma_desc_addr_msw = (uint32_t)(ring->rx_descs_phys >> 32);
97 
98 	AQ_DBG_ENTERA("[%d]", ring->index);
99 
100 	rdm_rx_desc_en_set(hw, false, ring->index);
101 
102 	rdm_rx_desc_head_splitting_set(hw, 0U, ring->index);
103 
104 	reg_rx_dma_desc_base_addresslswset(hw, dma_desc_addr_lsw, ring->index);
105 
106 	reg_rx_dma_desc_base_addressmswset(hw, dma_desc_addr_msw, ring->index);
107 
108 	rdm_rx_desc_len_set(hw, ring->rx_size / 8U, ring->index);
109 
110 	device_printf(ring->dev->dev,
111 	    "ring %d: __PAGESIZE=%d MCLBYTES=%d hw->max_frame_size=%d\n",
112 	    ring->index, PAGE_SIZE, MCLBYTES, ring->rx_max_frame_size);
113 	rdm_rx_desc_data_buff_size_set(hw, ring->rx_max_frame_size / 1024U,
114 	    ring->index);
115 
116 	rdm_rx_desc_head_buff_size_set(hw, 0U, ring->index);
117 	rdm_rx_desc_head_splitting_set(hw, 0U, ring->index);
118 	rpo_rx_desc_vlan_stripping_set(hw, 0U, ring->index);
119 
120 	/* Rx ring set mode */
121 
122 	/* Mapping interrupt vector */
123 	itr_irq_map_rx_set(hw, ring->msix, ring->index);
124 	itr_irq_map_en_rx_set(hw, true, ring->index);
125 
126 	rdm_cpu_id_set(hw, 0, ring->index);
127 	rdm_rx_desc_dca_en_set(hw, 0U, ring->index);
128 	rdm_rx_head_dca_en_set(hw, 0U, ring->index);
129 	rdm_rx_pld_dca_en_set(hw, 0U, ring->index);
130 
131 	err = aq_hw_err_from_flags(hw);
132 	AQ_DBG_EXIT(err);
133 	return (err);
134 }
135 
136 int
137 aq_ring_tx_init(struct aq_hw *hw, struct aq_ring *ring)
138 /*                     uint64_t ring_addr,
139                      uint32_t ring_size,
140                      uint32_t ring_idx,
141                      uint32_t interrupt_cause,
142                      uint32_t cpu_idx) */
143 {
144 	int err;
145 	uint32_t dma_desc_addr_lsw = (uint32_t)ring->tx_descs_phys & 0xffffffff;
146 	uint32_t dma_desc_addr_msw = (uint64_t)(ring->tx_descs_phys >> 32);
147 
148 	AQ_DBG_ENTERA("[%d]", ring->index);
149 
150 	tdm_tx_desc_en_set(hw, 0U, ring->index);
151 
152 	reg_tx_dma_desc_base_addresslswset(hw, dma_desc_addr_lsw, ring->index);
153 
154 	reg_tx_dma_desc_base_addressmswset(hw, dma_desc_addr_msw, ring->index);
155 
156 	tdm_tx_desc_len_set(hw, ring->tx_size / 8U, ring->index);
157 
158 	aq_ring_tx_tail_update(hw, ring, 0U);
159 
160 	/* Set Tx threshold */
161 	tdm_tx_desc_wr_wb_threshold_set(hw, 0U, ring->index);
162 
163 	/* Mapping interrupt vector */
164 	itr_irq_map_tx_set(hw, ring->msix, ring->index);
165 	itr_irq_map_en_tx_set(hw, true, ring->index);
166 
167 	tdm_cpu_id_set(hw, 0, ring->index);
168 	tdm_tx_desc_dca_en_set(hw, 0U, ring->index);
169 
170 	err = aq_hw_err_from_flags(hw);
171 	AQ_DBG_EXIT(err);
172 	return (err);
173 }
174 
175 int
176 aq_ring_tx_tail_update(struct aq_hw *hw, struct aq_ring *ring, uint32_t tail)
177 {
178 	AQ_DBG_ENTERA("[%d]", ring->index);
179 	reg_tx_dma_desc_tail_ptr_set(hw, tail, ring->index);
180 	AQ_DBG_EXIT(0);
181 	return (0);
182 }
183 
184 int
185 aq_ring_tx_start(struct aq_hw *hw, struct aq_ring *ring)
186 {
187 	int err;
188 
189 	AQ_DBG_ENTERA("[%d]", ring->index);
190 	tdm_tx_desc_en_set(hw, 1U, ring->index);
191 	err = aq_hw_err_from_flags(hw);
192 	AQ_DBG_EXIT(err);
193 	return (err);
194 }
195 
196 int
197 aq_ring_rx_start(struct aq_hw *hw, struct aq_ring *ring)
198 {
199 	int err;
200 
201 	AQ_DBG_ENTERA("[%d]", ring->index);
202 	rdm_rx_desc_en_set(hw, 1U, ring->index);
203 	err = aq_hw_err_from_flags(hw);
204 	AQ_DBG_EXIT(err);
205 	return (err);
206 }
207 
208 int
209 aq_ring_tx_stop(struct aq_hw *hw, struct aq_ring *ring)
210 {
211 	int err;
212 
213 	AQ_DBG_ENTERA("[%d]", ring->index);
214 	tdm_tx_desc_en_set(hw, 0U, ring->index);
215 	err = aq_hw_err_from_flags(hw);
216 	AQ_DBG_EXIT(err);
217 	return (err);
218 }
219 
220 int
221 aq_ring_rx_stop(struct aq_hw *hw, struct aq_ring *ring)
222 {
223 	int err;
224 
225 	AQ_DBG_ENTERA("[%d]", ring->index);
226 	rdm_rx_desc_en_set(hw, 0U, ring->index);
227 	/* Invalidate Descriptor Cache to prevent writing to the cached
228 	 * descriptors and to the data pointer of those descriptors
229 	 */
230 	rdm_rx_dma_desc_cache_init_tgl(hw);
231 	err = aq_hw_err_from_flags(hw);
232 	AQ_DBG_EXIT(err);
233 	return (err);
234 }
235 
236 static void
237 aq_ring_rx_refill(void* arg, if_rxd_update_t iru)
238 {
239 	aq_dev_t *aq_dev = arg;
240 	aq_rx_desc_t *rx_desc;
241 	struct aq_ring *ring;
242 	qidx_t i, pidx;
243 
244 	AQ_DBG_ENTERA("ring=%d iru_pidx=%d iru_count=%d iru->iru_buf_size=%d",
245 	    iru->iru_qsidx, iru->iru_pidx, iru->iru_count, iru->iru_buf_size);
246 
247 	ring = aq_dev->rx_rings[iru->iru_qsidx];
248 	pidx = iru->iru_pidx;
249 
250 	for (i = 0; i < iru->iru_count; i++) {
251 		rx_desc = (aq_rx_desc_t *) &ring->rx_descs[pidx];
252 		rx_desc->read.buf_addr = htole64(iru->iru_paddrs[i]);
253 		rx_desc->read.hdr_addr = 0;
254 
255 		pidx=aq_next(pidx, ring->rx_size - 1);
256 	}
257 
258 	AQ_DBG_EXIT(0);
259 }
260 
261 static void
262 aq_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx)
263 {
264 	aq_dev_t *aq_dev = arg;
265 	struct aq_ring *ring = aq_dev->rx_rings[rxqid];
266 
267 	AQ_DBG_ENTERA("[%d] tail=%u", ring->index, pidx);
268 	reg_rx_dma_desc_tail_ptr_set(&aq_dev->hw, pidx, ring->index);
269 	AQ_DBG_EXIT(0);
270 }
271 
272 static int
273 aq_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
274 {
275 	aq_dev_t *aq_dev = arg;
276 	struct aq_ring *ring = aq_dev->rx_rings[rxqid];
277 	aq_rx_desc_t *rx_desc = (aq_rx_desc_t *) ring->rx_descs;
278 	int cnt, i, iter;
279 
280 	AQ_DBG_ENTERA("[%d] head=%u, budget %d", ring->index, idx, budget);
281 
282 	for (iter = 0, cnt = 0, i = idx;
283 	    iter < ring->rx_size && cnt <= budget;) {
284 		trace_aq_rx_descr(ring->index, i,
285 		    (volatile uint64_t*)&rx_desc[i]);
286 		if (!rx_desc[i].wb.dd)
287 			break;
288 
289 		if (rx_desc[i].wb.eop) {
290 			iter++;
291 			i = aq_next(i, ring->rx_size - 1);
292 
293 			cnt++;
294 		} else {
295 			/* LRO/Jumbo: wait for whole packet be in the ring */
296 			if (rx_desc[i].wb.rsc_cnt) {
297 				i = rx_desc[i].wb.next_desp;
298 				iter++;
299 				continue;
300 			} else {
301 				iter++;
302 				i = aq_next(i, ring->rx_size - 1);
303 				continue;
304 			}
305 		}
306 	}
307 
308 	AQ_DBG_EXIT(cnt);
309 	return (cnt);
310 }
311 
312 static void
313 aq_rx_set_cso_flags(aq_rx_desc_t *rx_desc,  if_rxd_info_t ri)
314 {
315 	if ((rx_desc->wb.pkt_type & 0x3) == 0) { // IPv4
316 		if (rx_desc->wb.rx_cntl & BIT(0)) { // IPv4 csum checked
317 			ri->iri_csum_flags |= CSUM_IP_CHECKED;
318 			if (!(rx_desc->wb.rx_stat & BIT(1)))
319 				ri->iri_csum_flags |= CSUM_IP_VALID;
320 		}
321 	}
322 	if (rx_desc->wb.rx_cntl & BIT(1)) { // TCP/UDP csum checked
323 		ri->iri_csum_flags |= CSUM_L4_CALC;
324 		if (!(rx_desc->wb.rx_stat & BIT(2)) && // L4 csum error
325 			(rx_desc->wb.rx_stat & BIT(3))) {  // L4 csum valid
326 			ri->iri_csum_flags |= CSUM_L4_VALID;
327 			ri->iri_csum_data = htons(0xffff);
328 		}
329 	}
330 }
331 
332 static uint8_t bsd_rss_type[16] = {
333 	[AQ_RX_RSS_TYPE_IPV4] = M_HASHTYPE_RSS_IPV4,
334 	[AQ_RX_RSS_TYPE_IPV6] = M_HASHTYPE_RSS_IPV6,
335 	[AQ_RX_RSS_TYPE_IPV4_TCP] = M_HASHTYPE_RSS_TCP_IPV4,
336 	[AQ_RX_RSS_TYPE_IPV6_TCP] = M_HASHTYPE_RSS_TCP_IPV6,
337 	[AQ_RX_RSS_TYPE_IPV4_UDP] = M_HASHTYPE_RSS_UDP_IPV4,
338 	[AQ_RX_RSS_TYPE_IPV6_UDP] = M_HASHTYPE_RSS_UDP_IPV6,
339 };
340 
341 
342 
343 static int
344 aq_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
345 {
346 	aq_dev_t *aq_dev = arg;
347 	struct aq_ring *ring = aq_dev->rx_rings[ri->iri_qsidx];
348 	aq_rx_desc_t *rx_desc;
349 	if_t ifp;
350 	int cidx, rc = 0, i;
351 	size_t len, total_len;
352 
353 	AQ_DBG_ENTERA("[%d] start=%d", ring->index, ri->iri_cidx);
354 	cidx = ri->iri_cidx;
355 	ifp = iflib_get_ifp(aq_dev->ctx);
356 	i = 0;
357 
358 	do {
359 		rx_desc = (aq_rx_desc_t *) &ring->rx_descs[cidx];
360 
361 		trace_aq_rx_descr(ring->index, cidx,
362 		    (volatile uint64_t *)rx_desc);
363 
364 		if ((rx_desc->wb.rx_stat & BIT(0)) != 0) {
365 			ring->stats.rx_err++;
366 			rc = (EBADMSG);
367 			goto exit;
368 		}
369 
370 		if (!rx_desc->wb.eop) {
371 			len = ring->rx_max_frame_size;
372 		} else {
373 			total_len = le32toh(rx_desc->wb.pkt_len);
374 			len = total_len & (ring->rx_max_frame_size - 1);
375 		}
376 		ri->iri_frags[i].irf_flid = 0;
377 		ri->iri_frags[i].irf_idx = cidx;
378 		ri->iri_frags[i].irf_len = len;
379 
380 		if ((rx_desc->wb.pkt_type & 0x60) != 0) {
381 			ri->iri_flags |= M_VLANTAG;
382 			ri->iri_vtag = le32toh(rx_desc->wb.vlan);
383 		}
384 
385 		i++;
386 		cidx = aq_next(cidx, ring->rx_size - 1);
387 	} while (!rx_desc->wb.eop);
388 
389 	if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
390 		aq_rx_set_cso_flags(rx_desc, ri);
391 	}
392 	ri->iri_rsstype = bsd_rss_type[rx_desc->wb.rss_type & 0xF];
393 	if (ri->iri_rsstype != M_HASHTYPE_NONE) {
394 		ri->iri_flowid = le32toh(rx_desc->wb.rss_hash);
395 	}
396 
397 	ri->iri_len = total_len;
398 	ri->iri_nfrags = i;
399 
400 	ring->stats.rx_bytes += total_len;
401 	ring->stats.rx_pkts++;
402 
403 exit:
404 	AQ_DBG_EXIT(rc);
405 	return (rc);
406 }
407 
408 /*****************************************************************************/
409 /*                                                                           */
410 /*****************************************************************************/
411 
412 static void
413 aq_setup_offloads(aq_dev_t *aq_dev, if_pkt_info_t pi, aq_tx_desc_t *txd,
414     uint32_t tx_cmd)
415 {
416 	AQ_DBG_ENTER();
417 	txd->cmd |= tx_desc_cmd_fcs;
418 	txd->cmd |= (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO)) ?
419 	    tx_desc_cmd_ipv4 : 0;
420 	txd->cmd |= (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP |
421 	    CSUM_IP_UDP | CSUM_IP6_UDP)) ?  tx_desc_cmd_l4cs : 0;
422 	txd->cmd |= (pi->ipi_flags & IPI_TX_INTR) ? tx_desc_cmd_wb : 0;
423 	txd->cmd |= tx_cmd;
424 	AQ_DBG_EXIT(0);
425 }
426 
427 static int
428 aq_ring_tso_setup(aq_dev_t *aq_dev, if_pkt_info_t pi, uint32_t *hdrlen,
429     aq_txc_desc_t *txc)
430 {
431 	uint32_t tx_cmd = 0;
432 
433 	AQ_DBG_ENTER();
434 	if (pi->ipi_csum_flags & CSUM_TSO) {
435 		AQ_DBG_PRINT("aq_tso_setup(): TSO enabled");
436 		tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
437 
438 		if (pi->ipi_ipproto != IPPROTO_TCP) {
439 			AQ_DBG_PRINT("aq_tso_setup not a tcp");
440 			AQ_DBG_EXIT(0);
441 			return (0);
442 		}
443 
444 		txc->cmd = 0x4; /* TCP */
445 
446 		if (pi->ipi_csum_flags & CSUM_IP6_TCP)
447 		    txc->cmd |= 0x2;
448 
449 		txc->l2_len = pi->ipi_ehdrlen;
450 		txc->l3_len = pi->ipi_ip_hlen;
451 		txc->l4_len = pi->ipi_tcp_hlen;
452 		txc->mss_len = pi->ipi_tso_segsz;
453 		*hdrlen = txc->l2_len + txc->l3_len + txc->l4_len;
454 	}
455 
456 	// Set VLAN tag
457 	if (pi->ipi_mflags & M_VLANTAG) {
458 		tx_cmd |= tx_desc_cmd_vlan;
459 		txc->vlan_tag = htole16(pi->ipi_vtag);
460 	}
461 
462 	if (tx_cmd) {
463 		txc->type = tx_desc_type_ctx;
464 		txc->idx = 0;
465 	}
466 
467 	AQ_DBG_EXIT(tx_cmd);
468 	return (tx_cmd);
469 }
470 
471 static int
472 aq_isc_txd_encap(void *arg, if_pkt_info_t pi)
473 {
474 	aq_dev_t *aq_dev = arg;
475 	struct aq_ring *ring;
476 	aq_txc_desc_t *txc;
477 	aq_tx_desc_t *txd = NULL;
478 	bus_dma_segment_t *segs;
479 	qidx_t pidx;
480 	uint32_t hdrlen=0, pay_len;
481 	uint8_t tx_cmd = 0;
482 	int i, desc_count = 0;
483 
484 	AQ_DBG_ENTERA("[%d] start=%d", pi->ipi_qsidx, pi->ipi_pidx);
485 	ring = aq_dev->tx_rings[pi->ipi_qsidx];
486 
487 	segs = pi->ipi_segs;
488 	pidx = pi->ipi_pidx;
489 	txc = (aq_txc_desc_t *)&ring->tx_descs[pidx];
490 	AQ_DBG_PRINT("txc at 0x%p, txd at 0x%p len %d", txc, txd, pi->ipi_len);
491 
492 	pay_len = pi->ipi_len;
493 
494 	txc->flags1 = 0U;
495 	txc->flags2 = 0U;
496 
497 	tx_cmd = aq_ring_tso_setup(aq_dev, pi, &hdrlen, txc);
498 	AQ_DBG_PRINT("tx_cmd = 0x%x", tx_cmd);
499 
500 	if (tx_cmd) {
501 		trace_aq_tx_context_descr(ring->index, pidx,
502 		    (volatile void*)txc);
503 		/* We've consumed the first desc, adjust counters */
504 		pidx = aq_next(pidx, ring->tx_size - 1);
505 
506 		txd = &ring->tx_descs[pidx];
507 		txd->flags = 0U;
508 	} else {
509 		txd = (aq_tx_desc_t *)txc;
510 	}
511 	AQ_DBG_PRINT("txc at 0x%p, txd at 0x%p", txc, txd);
512 
513 	txd->ct_en = !!tx_cmd;
514 
515 	txd->type = tx_desc_type_desc;
516 
517 	aq_setup_offloads(aq_dev, pi, txd, tx_cmd);
518 
519 	if (tx_cmd) {
520 		txd->ct_idx = 0;
521 	}
522 
523 	pay_len -= hdrlen;
524 
525 	txd->pay_len = pay_len;
526 
527 	AQ_DBG_PRINT("num_frag[%d] pay_len[%d]", pi->ipi_nsegs, pay_len);
528 	for (i = 0; i < pi->ipi_nsegs; i++) {
529 		if (desc_count > 0) {
530 			txd = &ring->tx_descs[pidx];
531 			txd->flags = 0U;
532 		}
533 
534 		txd->buf_addr = htole64(segs[i].ds_addr);
535 
536 		txd->type = tx_desc_type_desc;
537 		txd->len = segs[i].ds_len;
538 		txd->pay_len = pay_len;
539 		if (i < pi->ipi_nsegs - 1)
540 			trace_aq_tx_descr(ring->index, pidx,
541 			    (volatile void*)txd);
542 
543 		pidx = aq_next(pidx, ring->tx_size - 1);
544 
545 		desc_count++;
546 	}
547 	// Last descriptor requires EOP and WB
548 	txd->eop = 1U;
549 
550 	AQ_DBG_DUMP_DESC(txd);
551 	trace_aq_tx_descr(ring->index, pidx, (volatile void*)txd);
552 	ring->tx_tail = pidx;
553 
554 	ring->stats.tx_pkts++;
555 	ring->stats.tx_bytes += pay_len;
556 
557 	pi->ipi_new_pidx = pidx;
558 
559 	AQ_DBG_EXIT(0);
560 	return (0);
561 }
562 
563 static void
564 aq_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
565 {
566 	aq_dev_t *aq_dev = arg;
567 	struct aq_ring *ring = aq_dev->tx_rings[txqid];
568 	AQ_DBG_ENTERA("[%d] tail=%d", ring->index, pidx);
569 
570 	// Update the write pointer - submits packet for transmission
571 	aq_ring_tx_tail_update(&aq_dev->hw, ring, pidx);
572 	AQ_DBG_EXIT(0);
573 }
574 
575 
576 static inline unsigned int
577 aq_avail_desc(int a, int b, int size)
578 {
579 	return (((b >= a)) ? ((size) - b + a) : (a - b));
580 }
581 
582 static int
583 aq_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
584 {
585 	aq_dev_t *aq_dev = arg;
586 	struct aq_ring *ring = aq_dev->tx_rings[txqid];
587 	uint32_t head;
588 	int avail;
589 
590 	AQ_DBG_ENTERA("[%d] clear=%d", ring->index, clear);
591 	avail = 0;
592 	head = tdm_tx_desc_head_ptr_get(&aq_dev->hw, ring->index);
593 	AQ_DBG_PRINT("swhead %d hwhead %d", ring->tx_head, head);
594 
595 	if (ring->tx_head == head) {
596 		avail = 0; // ring->tx_size;
597 		goto done;
598 	}
599 
600 	avail = aq_avail_desc(head, ring->tx_head, ring->tx_size);
601 	if (clear)
602 		ring->tx_head = head;
603 
604 done:
605 	AQ_DBG_EXIT(avail);
606 	return (avail);
607 }
608