xref: /freebsd/sys/dev/aq/aq_ring.c (revision 8666fda1afb03b3a88e57a20d76da8e7910b6407)
1 /*
2  * aQuantia Corporation Network Driver
3  * Copyright (C) 2014-2018 aQuantia Corporation. All rights reserved
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  *   (1) Redistributions of source code must retain the above
10  *   copyright notice, this list of conditions and the following
11  *   disclaimer.
12  *
13  *   (2) Redistributions in binary form must reproduce the above
14  *   copyright notice, this list of conditions and the following
15  *   disclaimer in the documentation and/or other materials provided
16  *   with the distribution.
17  *
18  *   (3)The name of the author may not be used to endorse or promote
19  *   products derived from this software without specific prior
20  *   written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
26  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
28  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <machine/param.h>
40 #include <sys/kernel.h>
41 #include <sys/socket.h>
42 #include <sys/bitstring.h>
43 #include <net/if.h>
44 #include <net/if_media.h>
45 #include <net/if_var.h>
46 #include <net/if_dl.h>
47 #include <net/ethernet.h>
48 #include <net/iflib.h>
49 #include <netinet/in.h>
50 
51 #include "aq_common.h"
52 
53 #include "aq_ring.h"
54 #include "aq_dbg.h"
55 #include "aq_device.h"
56 #include "aq_hw.h"
57 #include "aq_hw_llh.h"
58 
59 /* iflib txrx interface prototypes */
60 static int aq_isc_txd_encap(void *arg, if_pkt_info_t pi);
61 static void aq_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
62 static int aq_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
63 static void aq_ring_rx_refill(void* arg, if_rxd_update_t iru);
64 static void aq_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx);
65 static int aq_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget);
66 static int aq_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
67 
68 struct if_txrx aq_txrx = {
69 	.ift_txd_encap = aq_isc_txd_encap,
70 	.ift_txd_flush = aq_isc_txd_flush,
71 	.ift_txd_credits_update = aq_isc_txd_credits_update,
72 	.ift_rxd_available = aq_isc_rxd_available,
73 	.ift_rxd_pkt_get = aq_isc_rxd_pkt_get,
74 	.ift_rxd_refill = aq_ring_rx_refill,
75 	.ift_rxd_flush = aq_isc_rxd_flush,
76 	.ift_legacy_intr = NULL
77 };
78 
79 
80 static inline uint32_t
aq_next(uint32_t i,uint32_t lim)81 aq_next(uint32_t i, uint32_t lim)
82 {
83     return (i == lim) ? 0 : i + 1;
84 }
85 
aq_ring_rx_init(struct aq_hw * hw,struct aq_ring * ring)86 int aq_ring_rx_init(struct aq_hw *hw, struct aq_ring *ring)
87 /*                     uint64_t ring_addr,
88                      u32 ring_size,
89                      u32 ring_idx,
90                      u32 interrupt_cause,
91                      u32 cpu_idx) */
92 {
93     int err;
94     u32 dma_desc_addr_lsw = (u32)ring->rx_descs_phys & 0xffffffff;
95     u32 dma_desc_addr_msw = (u32)(ring->rx_descs_phys >> 32);
96 
97     AQ_DBG_ENTERA("[%d]", ring->index);
98 
99     rdm_rx_desc_en_set(hw, false, ring->index);
100 
101     rdm_rx_desc_head_splitting_set(hw, 0U, ring->index);
102 
103     reg_rx_dma_desc_base_addresslswset(hw, dma_desc_addr_lsw, ring->index);
104 
105     reg_rx_dma_desc_base_addressmswset(hw, dma_desc_addr_msw, ring->index);
106 
107     rdm_rx_desc_len_set(hw, ring->rx_size / 8U, ring->index);
108 
109     device_printf(ring->dev->dev, "ring %d: __PAGESIZE=%d MCLBYTES=%d hw->max_frame_size=%d\n",
110 				  ring->index, PAGE_SIZE, MCLBYTES, ring->rx_max_frame_size);
111     rdm_rx_desc_data_buff_size_set(hw, ring->rx_max_frame_size / 1024U, ring->index);
112 
113     rdm_rx_desc_head_buff_size_set(hw, 0U, ring->index);
114     rdm_rx_desc_head_splitting_set(hw, 0U, ring->index);
115     rpo_rx_desc_vlan_stripping_set(hw, 0U, ring->index);
116 
117     /* Rx ring set mode */
118 
119     /* Mapping interrupt vector */
120     itr_irq_map_rx_set(hw, ring->msix, ring->index);
121     itr_irq_map_en_rx_set(hw, true, ring->index);
122 
123     rdm_cpu_id_set(hw, 0, ring->index);
124     rdm_rx_desc_dca_en_set(hw, 0U, ring->index);
125     rdm_rx_head_dca_en_set(hw, 0U, ring->index);
126     rdm_rx_pld_dca_en_set(hw, 0U, ring->index);
127 
128     err = aq_hw_err_from_flags(hw);
129     AQ_DBG_EXIT(err);
130     return (err);
131 }
132 
aq_ring_tx_init(struct aq_hw * hw,struct aq_ring * ring)133 int aq_ring_tx_init(struct aq_hw *hw, struct aq_ring *ring)
134 /*                     uint64_t ring_addr,
135                      u32 ring_size,
136                      u32 ring_idx,
137                      u32 interrupt_cause,
138                      u32 cpu_idx) */
139 {
140     int err;
141     u32 dma_desc_addr_lsw = (u32)ring->tx_descs_phys & 0xffffffff;
142     u32 dma_desc_addr_msw = (u64)(ring->tx_descs_phys >> 32);
143 
144     AQ_DBG_ENTERA("[%d]", ring->index);
145 
146     tdm_tx_desc_en_set(hw, 0U, ring->index);
147 
148     reg_tx_dma_desc_base_addresslswset(hw, dma_desc_addr_lsw, ring->index);
149 
150     reg_tx_dma_desc_base_addressmswset(hw, dma_desc_addr_msw, ring->index);
151 
152     tdm_tx_desc_len_set(hw, ring->tx_size / 8U, ring->index);
153 
154     aq_ring_tx_tail_update(hw, ring, 0U);
155 
156     /* Set Tx threshold */
157     tdm_tx_desc_wr_wb_threshold_set(hw, 0U, ring->index);
158 
159     /* Mapping interrupt vector */
160     itr_irq_map_tx_set(hw, ring->msix, ring->index);
161     itr_irq_map_en_tx_set(hw, true, ring->index);
162 
163     tdm_cpu_id_set(hw, 0, ring->index);
164     tdm_tx_desc_dca_en_set(hw, 0U, ring->index);
165 
166     err = aq_hw_err_from_flags(hw);
167     AQ_DBG_EXIT(err);
168     return (err);
169 }
170 
aq_ring_tx_tail_update(struct aq_hw * hw,struct aq_ring * ring,u32 tail)171 int aq_ring_tx_tail_update(struct aq_hw *hw, struct aq_ring *ring, u32 tail)
172 {
173     AQ_DBG_ENTERA("[%d]", ring->index);
174     reg_tx_dma_desc_tail_ptr_set(hw, tail, ring->index);
175     AQ_DBG_EXIT(0);
176     return (0);
177 }
178 
aq_ring_tx_start(struct aq_hw * hw,struct aq_ring * ring)179 int aq_ring_tx_start(struct aq_hw *hw, struct aq_ring *ring)
180 {
181     int err;
182 
183     AQ_DBG_ENTERA("[%d]", ring->index);
184     tdm_tx_desc_en_set(hw, 1U, ring->index);
185     err = aq_hw_err_from_flags(hw);
186     AQ_DBG_EXIT(err);
187     return (err);
188 }
189 
aq_ring_rx_start(struct aq_hw * hw,struct aq_ring * ring)190 int aq_ring_rx_start(struct aq_hw *hw, struct aq_ring *ring)
191 {
192     int err;
193 
194     AQ_DBG_ENTERA("[%d]", ring->index);
195     rdm_rx_desc_en_set(hw, 1U, ring->index);
196     err = aq_hw_err_from_flags(hw);
197     AQ_DBG_EXIT(err);
198     return (err);
199 }
200 
aq_ring_tx_stop(struct aq_hw * hw,struct aq_ring * ring)201 int aq_ring_tx_stop(struct aq_hw *hw, struct aq_ring *ring)
202 {
203     int err;
204 
205     AQ_DBG_ENTERA("[%d]", ring->index);
206     tdm_tx_desc_en_set(hw, 0U, ring->index);
207     err = aq_hw_err_from_flags(hw);
208     AQ_DBG_EXIT(err);
209     return (err);
210 }
211 
aq_ring_rx_stop(struct aq_hw * hw,struct aq_ring * ring)212 int aq_ring_rx_stop(struct aq_hw *hw, struct aq_ring *ring)
213 {
214     int err;
215 
216     AQ_DBG_ENTERA("[%d]", ring->index);
217     rdm_rx_desc_en_set(hw, 0U, ring->index);
218     /* Invalidate Descriptor Cache to prevent writing to the cached
219      * descriptors and to the data pointer of those descriptors
220      */
221     rdm_rx_dma_desc_cache_init_tgl(hw);
222     err = aq_hw_err_from_flags(hw);
223     AQ_DBG_EXIT(err);
224     return (err);
225 }
226 
aq_ring_rx_refill(void * arg,if_rxd_update_t iru)227 static void aq_ring_rx_refill(void* arg, if_rxd_update_t iru)
228 {
229 	aq_dev_t *aq_dev = arg;
230 	aq_rx_desc_t *rx_desc;
231 	struct aq_ring *ring;
232 	qidx_t i, pidx;
233 
234 	AQ_DBG_ENTERA("ring=%d iru_pidx=%d iru_count=%d iru->iru_buf_size=%d",
235 				  iru->iru_qsidx, iru->iru_pidx, iru->iru_count, iru->iru_buf_size);
236 
237 	ring = aq_dev->rx_rings[iru->iru_qsidx];
238 	pidx = iru->iru_pidx;
239 
240 	for (i = 0; i < iru->iru_count; i++) {
241 		rx_desc = (aq_rx_desc_t *) &ring->rx_descs[pidx];
242 		rx_desc->read.buf_addr = htole64(iru->iru_paddrs[i]);
243 		rx_desc->read.hdr_addr = 0;
244 
245 		pidx=aq_next(pidx, ring->rx_size - 1);
246 	}
247 
248 	AQ_DBG_EXIT(0);
249 }
250 
aq_isc_rxd_flush(void * arg,uint16_t rxqid,uint8_t flid __unused,qidx_t pidx)251 static void aq_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
252 							 qidx_t pidx)
253 {
254 	aq_dev_t *aq_dev = arg;
255 	struct aq_ring *ring = aq_dev->rx_rings[rxqid];
256 
257 	AQ_DBG_ENTERA("[%d] tail=%u", ring->index, pidx);
258 	reg_rx_dma_desc_tail_ptr_set(&aq_dev->hw, pidx, ring->index);
259 	AQ_DBG_EXIT(0);
260 }
261 
aq_isc_rxd_available(void * arg,uint16_t rxqid,qidx_t idx,qidx_t budget)262 static int aq_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
263 {
264 	aq_dev_t *aq_dev = arg;
265 	struct aq_ring *ring = aq_dev->rx_rings[rxqid];
266 	aq_rx_desc_t *rx_desc = (aq_rx_desc_t *) ring->rx_descs;
267 	int cnt, i, iter;
268 
269 	AQ_DBG_ENTERA("[%d] head=%u, budget %d", ring->index, idx, budget);
270 
271 	for (iter = 0, cnt = 0, i = idx; iter < ring->rx_size && cnt <= budget;) {
272 		trace_aq_rx_descr(ring->index, i, (volatile u64*)&rx_desc[i]);
273 		if (!rx_desc[i].wb.dd)
274 			break;
275 
276 		if (rx_desc[i].wb.eop) {
277 			iter++;
278 			i = aq_next(i, ring->rx_size - 1);
279 
280 			cnt++;
281 		} else {
282 			/* LRO/Jumbo: wait for whole packet be in the ring */
283 			if (rx_desc[i].wb.rsc_cnt) {
284 				i = rx_desc[i].wb.next_desp;
285 				iter++;
286 				continue;
287 			} else {
288 				iter++;
289 				i = aq_next(i, ring->rx_size - 1);
290 				continue;
291 			}
292 		}
293 	}
294 
295 	AQ_DBG_EXIT(cnt);
296 	return (cnt);
297 }
298 
aq_rx_set_cso_flags(aq_rx_desc_t * rx_desc,if_rxd_info_t ri)299 static void aq_rx_set_cso_flags(aq_rx_desc_t *rx_desc,  if_rxd_info_t ri)
300 {
301 	if ((rx_desc->wb.pkt_type & 0x3) == 0) { //IPv4
302 		if (rx_desc->wb.rx_cntl & BIT(0)){ // IPv4 csum checked
303 			ri->iri_csum_flags |= CSUM_IP_CHECKED;
304 			if (!(rx_desc->wb.rx_stat & BIT(1)))
305 				ri->iri_csum_flags |= CSUM_IP_VALID;
306 		}
307 	}
308 	if (rx_desc->wb.rx_cntl & BIT(1)) { // TCP/UDP csum checked
309 		ri->iri_csum_flags |= CSUM_L4_CALC;
310 		if (!(rx_desc->wb.rx_stat & BIT(2)) && // L4 csum error
311 			(rx_desc->wb.rx_stat & BIT(3))) {  // L4 csum valid
312 			ri->iri_csum_flags |= CSUM_L4_VALID;
313 			ri->iri_csum_data = htons(0xffff);
314 		}
315 	}
316 }
317 
318 static uint8_t bsd_rss_type[16] = {
319 	[AQ_RX_RSS_TYPE_IPV4]=M_HASHTYPE_RSS_IPV4,
320 	[AQ_RX_RSS_TYPE_IPV6]=M_HASHTYPE_RSS_IPV6,
321 	[AQ_RX_RSS_TYPE_IPV4_TCP]=M_HASHTYPE_RSS_TCP_IPV4,
322 	[AQ_RX_RSS_TYPE_IPV6_TCP]=M_HASHTYPE_RSS_TCP_IPV6,
323 	[AQ_RX_RSS_TYPE_IPV4_UDP]=M_HASHTYPE_RSS_UDP_IPV4,
324 	[AQ_RX_RSS_TYPE_IPV6_UDP]=M_HASHTYPE_RSS_UDP_IPV6,
325 };
326 
327 
328 
aq_isc_rxd_pkt_get(void * arg,if_rxd_info_t ri)329 static int aq_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
330 {
331 	aq_dev_t *aq_dev = arg;
332 	struct aq_ring *ring = aq_dev->rx_rings[ri->iri_qsidx];
333 	aq_rx_desc_t *rx_desc;
334 	if_t ifp;
335 	int cidx, rc = 0, i;
336 	size_t len, total_len;
337 
338 	AQ_DBG_ENTERA("[%d] start=%d", ring->index, ri->iri_cidx);
339 	cidx = ri->iri_cidx;
340 	ifp = iflib_get_ifp(aq_dev->ctx);
341 	i = 0;
342 
343 	do {
344 		rx_desc = (aq_rx_desc_t *) &ring->rx_descs[cidx];
345 
346 		trace_aq_rx_descr(ring->index, cidx, (volatile u64*)rx_desc);
347 
348 		if ((rx_desc->wb.rx_stat & BIT(0)) != 0) {
349 			ring->stats.rx_err++;
350 			rc = (EBADMSG);
351 			goto exit;
352 		}
353 
354 		if (!rx_desc->wb.eop) {
355 			len = ring->rx_max_frame_size;
356 		} else {
357 			total_len = le32toh(rx_desc->wb.pkt_len);
358 			len = total_len & (ring->rx_max_frame_size - 1);
359 		}
360 		ri->iri_frags[i].irf_flid = 0;
361 		ri->iri_frags[i].irf_idx = cidx;
362 		ri->iri_frags[i].irf_len = len;
363 
364 		if ((rx_desc->wb.pkt_type & 0x60) != 0) {
365 			ri->iri_flags |= M_VLANTAG;
366 			ri->iri_vtag = le32toh(rx_desc->wb.vlan);
367 		}
368 
369 		i++;
370 		cidx = aq_next(cidx, ring->rx_size - 1);
371 	} while (!rx_desc->wb.eop);
372 
373 	if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
374 		aq_rx_set_cso_flags(rx_desc, ri);
375 	}
376 	ri->iri_rsstype = bsd_rss_type[rx_desc->wb.rss_type & 0xF];
377 	if (ri->iri_rsstype != M_HASHTYPE_NONE) {
378 		ri->iri_flowid = le32toh(rx_desc->wb.rss_hash);
379 	}
380 
381 	ri->iri_len = total_len;
382 	ri->iri_nfrags = i;
383 
384 	ring->stats.rx_bytes += total_len;
385 	ring->stats.rx_pkts++;
386 
387 exit:
388 	AQ_DBG_EXIT(rc);
389 	return (rc);
390 }
391 
392 /*****************************************************************************/
393 /*                                                                           */
394 /*****************************************************************************/
395 
aq_setup_offloads(aq_dev_t * aq_dev,if_pkt_info_t pi,aq_tx_desc_t * txd,u32 tx_cmd)396 static void aq_setup_offloads(aq_dev_t *aq_dev, if_pkt_info_t pi, aq_tx_desc_t *txd, u32 tx_cmd)
397 {
398     AQ_DBG_ENTER();
399     txd->cmd |= tx_desc_cmd_fcs;
400     txd->cmd |= (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO)) ? tx_desc_cmd_ipv4 : 0;
401     txd->cmd |= (pi->ipi_csum_flags &
402 				 (CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_IP_UDP | CSUM_IP6_UDP)
403 				) ? tx_desc_cmd_l4cs : 0;
404     txd->cmd |= (pi->ipi_flags & IPI_TX_INTR) ? tx_desc_cmd_wb : 0;
405     txd->cmd |= tx_cmd;
406     AQ_DBG_EXIT(0);
407 }
408 
aq_ring_tso_setup(aq_dev_t * aq_dev,if_pkt_info_t pi,uint32_t * hdrlen,aq_txc_desc_t * txc)409 static int aq_ring_tso_setup(aq_dev_t *aq_dev, if_pkt_info_t pi, uint32_t *hdrlen, aq_txc_desc_t *txc)
410 {
411 	uint32_t tx_cmd = 0;
412 
413 	AQ_DBG_ENTER();
414 	if (pi->ipi_csum_flags & CSUM_TSO) {
415 		AQ_DBG_PRINT("aq_tso_setup(): TSO enabled");
416 		tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
417 
418 		if (pi->ipi_ipproto != IPPROTO_TCP) {
419 			AQ_DBG_PRINT("aq_tso_setup not a tcp");
420 			AQ_DBG_EXIT(0);
421 			return (0);
422 		}
423 
424 		txc->cmd = 0x4; /* TCP */
425 
426 		if (pi->ipi_csum_flags & CSUM_IP6_TCP)
427 		    txc->cmd |= 0x2;
428 
429 		txc->l2_len = pi->ipi_ehdrlen;
430 		txc->l3_len = pi->ipi_ip_hlen;
431 		txc->l4_len = pi->ipi_tcp_hlen;
432 		txc->mss_len = pi->ipi_tso_segsz;
433 		*hdrlen = txc->l2_len + txc->l3_len + txc->l4_len;
434 	}
435 
436 	// Set VLAN tag
437 	if (pi->ipi_mflags & M_VLANTAG) {
438 		tx_cmd |= tx_desc_cmd_vlan;
439 		txc->vlan_tag = htole16(pi->ipi_vtag);
440 	}
441 
442 	if (tx_cmd) {
443 		txc->type = tx_desc_type_ctx;
444 		txc->idx = 0;
445 	}
446 
447 	AQ_DBG_EXIT(tx_cmd);
448 	return (tx_cmd);
449 }
450 
aq_isc_txd_encap(void * arg,if_pkt_info_t pi)451 static int aq_isc_txd_encap(void *arg, if_pkt_info_t pi)
452 {
453 	aq_dev_t *aq_dev = arg;
454 	struct aq_ring *ring;
455 	aq_txc_desc_t *txc;
456 	aq_tx_desc_t *txd = NULL;
457 	bus_dma_segment_t *segs;
458 	qidx_t pidx;
459 	uint32_t hdrlen=0, pay_len;
460 	uint8_t tx_cmd = 0;
461 	int i, desc_count = 0;
462 
463 	AQ_DBG_ENTERA("[%d] start=%d", pi->ipi_qsidx, pi->ipi_pidx);
464 	ring = aq_dev->tx_rings[pi->ipi_qsidx];
465 
466 	segs = pi->ipi_segs;
467 	pidx = pi->ipi_pidx;
468 	txc = (aq_txc_desc_t *)&ring->tx_descs[pidx];
469 	AQ_DBG_PRINT("txc at 0x%p, txd at 0x%p len %d", txc, txd, pi->ipi_len);
470 
471 	pay_len = pi->ipi_len;
472 
473 	txc->flags1 = 0U;
474 	txc->flags2 = 0U;
475 
476 	tx_cmd = aq_ring_tso_setup(aq_dev, pi, &hdrlen, txc);
477 	AQ_DBG_PRINT("tx_cmd = 0x%x", tx_cmd);
478 
479 	if (tx_cmd) {
480 		trace_aq_tx_context_descr(ring->index, pidx, (volatile void*)txc);
481 		/* We've consumed the first desc, adjust counters */
482 		pidx = aq_next(pidx, ring->tx_size - 1);
483 
484 		txd = &ring->tx_descs[pidx];
485 		txd->flags = 0U;
486 	} else {
487 		txd = (aq_tx_desc_t *)txc;
488 	}
489 	AQ_DBG_PRINT("txc at 0x%p, txd at 0x%p", txc, txd);
490 
491 	txd->ct_en = !!tx_cmd;
492 
493 	txd->type = tx_desc_type_desc;
494 
495 	aq_setup_offloads(aq_dev, pi, txd, tx_cmd);
496 
497 	if (tx_cmd) {
498 		txd->ct_idx = 0;
499 	}
500 
501 	pay_len -= hdrlen;
502 
503 	txd->pay_len = pay_len;
504 
505 	AQ_DBG_PRINT("num_frag[%d] pay_len[%d]", pi->ipi_nsegs, pay_len);
506 	for (i = 0; i < pi->ipi_nsegs; i++) {
507 		if (desc_count > 0) {
508 			txd = &ring->tx_descs[pidx];
509 			txd->flags = 0U;
510 		}
511 
512 		txd->buf_addr = htole64(segs[i].ds_addr);
513 
514 		txd->type = tx_desc_type_desc;
515 		txd->len = segs[i].ds_len;
516 		txd->pay_len = pay_len;
517 		if (i < pi->ipi_nsegs - 1)
518 			trace_aq_tx_descr(ring->index, pidx, (volatile void*)txd);
519 
520 		pidx = aq_next(pidx, ring->tx_size - 1);
521 
522 		desc_count++;
523 	}
524 	// Last descriptor requires EOP and WB
525 	txd->eop = 1U;
526 
527 	AQ_DBG_DUMP_DESC(txd);
528 	trace_aq_tx_descr(ring->index, pidx, (volatile void*)txd);
529 	ring->tx_tail = pidx;
530 
531 	ring->stats.tx_pkts++;
532 	ring->stats.tx_bytes += pay_len;
533 
534 	pi->ipi_new_pidx = pidx;
535 
536 	AQ_DBG_EXIT(0);
537 	return (0);
538 }
539 
aq_isc_txd_flush(void * arg,uint16_t txqid,qidx_t pidx)540 static void aq_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
541 {
542 	aq_dev_t *aq_dev = arg;
543 	struct aq_ring *ring = aq_dev->tx_rings[txqid];
544 	AQ_DBG_ENTERA("[%d] tail=%d", ring->index, pidx);
545 
546 	// Update the write pointer - submits packet for transmission
547 	aq_ring_tx_tail_update(&aq_dev->hw, ring, pidx);
548 	AQ_DBG_EXIT(0);
549 }
550 
551 
aq_avail_desc(int a,int b,int size)552 static inline unsigned int aq_avail_desc(int a, int b, int size)
553 {
554     return (((b >= a)) ? ((size ) - b + a) : (a - b));
555 }
556 
aq_isc_txd_credits_update(void * arg,uint16_t txqid,bool clear)557 static int aq_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
558 {
559 	aq_dev_t *aq_dev = arg;
560 	struct aq_ring *ring = aq_dev->tx_rings[txqid];
561 	uint32_t head;
562 	int avail;
563 
564 	AQ_DBG_ENTERA("[%d] clear=%d", ring->index, clear);
565 	avail = 0;
566 	head = tdm_tx_desc_head_ptr_get(&aq_dev->hw, ring->index);
567 	AQ_DBG_PRINT("swhead %d hwhead %d", ring->tx_head, head);
568 
569 	if (ring->tx_head == head) {
570 		avail = 0; //ring->tx_size;
571 		goto done;
572 	}
573 
574 	avail = aq_avail_desc(head, ring->tx_head, ring->tx_size);
575 	if (clear)
576 		ring->tx_head = head;
577 
578 done:
579 	AQ_DBG_EXIT(avail);
580 	return (avail);
581 }
582